diff --git a/BUILD.bazel b/BUILD.bazel index 25cd3758..b9787e5d 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,6 +1,6 @@ load("@bazel_gazelle//:def.bzl", "gazelle") -load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") load("@io_bazel_rules_go//go:def.bzl", "nogo") +load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") exports_files([ "BUILD", diff --git a/WORKSPACE b/WORKSPACE index 7d775e1b..99097313 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -50,22 +50,22 @@ http_archive( http_archive( name = "bazel_gazelle", - integrity = "sha256-MpOL2hbmcABjA1R5Bj2dJMYO2o15/Uc5Vj9Q0zHLMgk=", + integrity = "sha256-12v3pg/YsFBEQJDfooN6Tq+YKeEWVhjuNdzspcvfWNU=", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz", - "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.37.0/bazel-gazelle-v0.37.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.37.0/bazel-gazelle-v0.37.0.tar.gz", ], ) -load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("//:repositories.bzl", "go_repositories") go_rules_dependencies() go_register_toolchains( nogo = "@peridot//:nogo", - version = "1.22.2", + version = "1.23.2", ) # gazelle:repository_macro repositories.bzl%go_repositories @@ -76,29 +76,29 @@ go_repository( build_file_generation = "on", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=", - version = "v1.62.0", + sum = "h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=", + version = "v1.65.0", ) go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=", - version = "v0.21.0", + sum = "h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=", + version = "v0.27.0", ) go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", - sum = "h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=", - version = "v0.14.0", + sum = "h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=", + version = "v0.16.0", ) go_repository( name = "org_golang_x_oauth2", importpath = "golang.org/x/oauth2", - sum = "h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=", - version = "v0.17.0", + sum = "h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=", + version = "v0.21.0", ) gazelle_dependencies() diff --git a/apollo/proto/v1/BUILD.bazel b/apollo/proto/v1/BUILD.bazel index 1ec0ece7..db3d4912 100644 --- a/apollo/proto/v1/BUILD.bazel +++ b/apollo/proto/v1/BUILD.bazel @@ -1,8 +1,8 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") +load("@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2:defs.bzl", "protoc_gen_openapiv2") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") -load("@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2:defs.bzl", "protoc_gen_openapiv2") load("@openapi_tools_generator_bazel//:defs.bzl", "openapi_generator") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "apollopb_proto", diff --git a/build/bazel/protobuf/BUILD.bazel b/build/bazel/protobuf/BUILD.bazel index 65fd394b..643cc095 100644 --- a/build/bazel/protobuf/BUILD.bazel +++ b/build/bazel/protobuf/BUILD.bazel @@ -1,6 +1,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "bazelbuild_proto", diff --git a/build/bazel/remote/execution/v2/BUILD b/build/bazel/remote/execution/v2/BUILD index 5686fca3..417bddaf 100644 --- a/build/bazel/remote/execution/v2/BUILD +++ b/build/bazel/remote/execution/v2/BUILD @@ -1,10 +1,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) -load("@rules_proto//proto:defs.bzl", "proto_library") - licenses(["notice"]) proto_library( diff --git a/build/bazel/semver/BUILD b/build/bazel/semver/BUILD index 43ae4f56..890995b9 100644 --- a/build/bazel/semver/BUILD +++ b/build/bazel/semver/BUILD @@ -1,10 +1,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) -load("@rules_proto//proto:defs.bzl", "proto_library") - licenses(["notice"]) proto_library( diff --git a/go.mod b/go.mod index 8e27fb31..73da520e 100644 --- a/go.mod +++ b/go.mod @@ -7,17 +7,17 @@ require ( alexejk.io/go-xmlrpc v0.2.0 bazel.build/protobuf v0.0.0-00010101000000-000000000000 cirello.io/dynamolock v1.4.0 - github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f - github.com/ProtonMail/gopenpgp/v2 v2.4.7 + github.com/ProtonMail/go-crypto v1.0.0 + github.com/ProtonMail/gopenpgp/v2 v2.7.5 github.com/authzed/authzed-go v0.3.0 github.com/authzed/grpcutil v0.0.0-20240123194739-2ea1e3d2d98b - github.com/aws/aws-sdk-go v1.44.129 + github.com/aws/aws-sdk-go v1.54.19 github.com/cavaliergopher/rpm v1.2.0 github.com/coreos/go-oidc/v3 v3.0.0 github.com/fatih/color v1.15.0 github.com/go-chi/chi v4.1.2+incompatible - github.com/go-git/go-billy/v5 v5.3.1 - github.com/go-git/go-git/v5 v5.2.0 + github.com/go-git/go-billy/v5 v5.5.0 + github.com/go-git/go-git/v5 v5.12.0 github.com/gobwas/glob v0.2.3 github.com/gogo/status v1.1.0 github.com/google/uuid v1.6.0 @@ -29,20 +29,20 @@ require ( github.com/ory/hydra-client-go/v2 v2.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 - github.com/rocky-linux/srpmproc v0.4.3 - github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.1.3 + github.com/rocky-linux/srpmproc v0.6.4 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.7.1 + github.com/spf13/viper v1.19.0 github.com/vbauerster/mpb/v7 v7.0.2 github.com/xanzy/go-gitlab v0.50.4 go.temporal.io/api v1.6.1-0.20211110205628-60c98e9cbfe2 go.temporal.io/sdk v1.13.1 - golang.org/x/oauth2 v0.17.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c - google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c - google.golang.org/grpc v1.62.0 - google.golang.org/protobuf v1.32.0 + golang.org/x/oauth2 v0.21.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d + google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d + google.golang.org/grpc v1.65.0 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 @@ -57,27 +57,32 @@ require ( ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect - cloud.google.com/go/storage v1.36.0 // indirect - github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.11 // indirect + cloud.google.com/go/storage v1.43.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bluekeyes/go-gitdiff v0.5.0 // indirect + github.com/bluekeyes/go-gitdiff v0.7.3 // indirect github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.9 // indirect + github.com/cyphar/filepath-securejoin v0.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/emirpasic/gods v1.12.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -86,73 +91,75 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.1 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.6.8 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.11 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 // indirect - github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect - github.com/magiconair/properties v1.8.1 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-sqlite3 v1.14.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml v1.8.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/robfig/cron v1.2.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.3.0 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/stretchr/testify v1.8.4 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/xanzy/ssh-agent v0.2.1 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.9.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect - google.golang.org/api v0.166.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/api v0.188.0 // indirect + google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.57.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 35f8720a..ccc7f914 100644 --- a/go.sum +++ b/go.sum @@ -7,7 +7,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -18,26 +17,26 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.7.1 h1:Iv1bbpzJ2OIg16m94XI9/tlzZZl3cdeR3nGVGj78N7s= +cloud.google.com/go/auth v0.7.1/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= +cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= +cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k= +cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -47,36 +46,36 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f h1:J2FzIrXN82q5uyUraeJpLIm7U6PffRwje2ORho5yIik= -github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f h1:CGq7OieOz3wyQJ1fO8S0eO9TCW1JyvLrf8fhzz1i8ko= -github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f/go.mod h1:NYt+V3/4rEeDuaev/zw1zCq8uqVEuPHzDPo3OZrlGJ4= -github.com/ProtonMail/gopenpgp/v2 v2.4.7 h1:V3xeelvXgJiZXZuPtSSE+uYbtPw4RmbmyPqXDAESPhg= -github.com/ProtonMail/gopenpgp/v2 v2.4.7/go.mod h1:ZW1KxHNG6q5LMgFKf9Ap/d2eVYeyGf5+fAUEAjJWtmo= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= +github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= +github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA= +github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/authzed/authzed-go v0.3.0 h1:m5eqPX9p1mhdbd8jrFhNORx5PvnHQ2e1bISweEwja+E= @@ -85,17 +84,16 @@ github.com/authzed/grpcutil v0.0.0-20210913124023-cad23ae5a9e8/go.mod h1:HwO/KbR github.com/authzed/grpcutil v0.0.0-20240123194739-2ea1e3d2d98b h1:wbh8IK+aMLTCey9sZasO7b6BWLAJnHHvb79fvWCXwxw= github.com/authzed/grpcutil v0.0.0-20240123194739-2ea1e3d2d98b/go.mod h1:s3qC7V7XIbiNWERv7Lfljy/Lx25/V1Qlexb0WJuA8uQ= github.com/aws/aws-sdk-go v1.34.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.44.129 h1:yld8Rc8OCahLtenY1mnve4w1jVeBu/rSiscGzodaDOs= -github.com/aws/aws-sdk-go v1.44.129/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bluekeyes/go-gitdiff v0.5.0 h1:AXrIoy/VEA9Baz2lhwMlpdzDJ/sKof6C9yTt1oqw4hQ= -github.com/bluekeyes/go-gitdiff v0.5.0/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= +github.com/bluekeyes/go-gitdiff v0.7.3 h1:SElKwtm/IQPOwKs0vdowW5uAlip+P+jatagmUU8E0r4= +github.com/bluekeyes/go-gitdiff v0.7.3/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cavaliergopher/rpm v1.2.0 h1:s0h+QeVK252QFTolkhGiMeQ1f+tMeIMhGl8B1HUmGUc= github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFfphuXiExVos= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -104,41 +102,41 @@ github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc/v3 v3.0.0 h1:/mAA0XMgYJw2Uqm7WKGCsKnjitE/+A0FFbOmiRJm7LQ= github.com/coreos/go-oidc/v3 v3.0.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.3.0 h1:tXpmbiaeBrS/K2US8nhgwdKYnfAOnVfkcLPKFgFHeA0= +github.com/cyphar/filepath-securejoin v0.3.0/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= @@ -148,29 +146,27 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= -github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -184,8 +180,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -206,17 +202,12 @@ github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0 github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -247,8 +238,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -260,7 +251,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -276,9 +266,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -286,9 +275,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -303,63 +289,35 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0/go.mod h1:qrJPVzv9YlhsrxJc3P/Q85nr0w1lIRikTl4JlhdDH5w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -367,7 +325,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -378,15 +335,12 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 h1:thTca5Eyouk5CEcJ75Cbw9CSAGE7TAc6rIi+WgHWpOE= github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70/go.mod h1:hHYbgxJuNLRw91CmpuFsYEOyQqpDVFg8pvEh23vy4P0= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -405,14 +359,12 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -423,18 +375,8 @@ github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwp github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -447,7 +389,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= @@ -455,23 +396,21 @@ github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8P github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/ory/hydra-client-go/v2 v2.2.0 h1:g8hw0YQD5Us1aAgZj7OyBmBGSDwlnY9/2Pb/pQQq8YE= github.com/ory/hydra-client-go/v2 v2.2.0/go.mod h1:h0DSI2kQA3S2fN7HyD8DNWcvbgDmYRSxfhwu/mSBhH8= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -484,8 +423,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -493,65 +430,59 @@ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rocky-linux/srpmproc v0.4.3 h1:PnfuOQbGZEwOEr1u7Notz9Pm+Ol38Tu7mo8/63rgeaE= -github.com/rocky-linux/srpmproc v0.4.3/go.mod h1:x8Z2wqhV2JqRnYMhYz3thOQkfsSWjJkyX8DVGDPOb48= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rocky-linux/srpmproc v0.6.4 h1:lX+Pp6HIsus7kp9xSuifcH7b6jrELfOnUpr87ia5KMc= +github.com/rocky-linux/srpmproc v0.6.4/go.mod h1:UO3uWDMsQvj5Mz7YEa3QI1LuP8YYCc1B4ctoIXYPz2I= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -561,45 +492,42 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/vbauerster/mpb/v7 v7.0.2 h1:eN6AD/ytv1nqCO7Dm8MO0/pGMKmMyH/WMnTJhAUuc/w= github.com/vbauerster/mpb/v7 v7.0.2/go.mod h1:Mnq3gESXJ9eQhccbGZDggJ1faTCrmaA4iN57fUloRGE= github.com/xanzy/go-gitlab v0.50.4 h1:DA0+D9eTBnZvrGBOQ66XV4ZV9gOFXKbbgm9L99EKCUs= github.com/xanzy/go-gitlab v0.50.4/go.mod h1:Q+hQhV508bDPoBijv7YjK/Lvlb4PhVhJdKqXVQrUoAE= -github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.temporal.io/api v1.6.1-0.20211110205628-60c98e9cbfe2 h1:TN/PQNywCnOG/hXLHKkKKOQQtpi7JHBDD8fpv8H8JiA= go.temporal.io/api v1.6.1-0.20211110205628-60c98e9cbfe2/go.mod h1:IlUgOTGfmJuOkGrCZdptNxyXKE9CQz6oOx7/aH9bFY4= @@ -607,33 +535,32 @@ go.temporal.io/sdk v1.13.1 h1:4LRIe1WLM+m2pN6sNod4sMV+0bV8WTscVfRsipOP8N8= go.temporal.io/sdk v1.13.1/go.mod h1:TCof7U/xas2FyDnx/UUEv4c/O/S41Lnhva+6JVer+Jo= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -641,6 +568,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA= +golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -653,28 +582,21 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -702,10 +624,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -715,23 +634,23 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -744,16 +663,13 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -763,7 +679,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -783,17 +698,11 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -804,41 +713,47 @@ golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -849,7 +764,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -857,7 +771,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -878,19 +791,13 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -913,11 +820,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -925,9 +829,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -960,22 +861,15 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c h1:Zmyn5CV/jxzKnF+3d+xzbomACPwLQqVpLTpyXN5uTaQ= -google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d h1:/hmn0Ku5kWij/kjGsrcJeC1T/MrJi2iNWwgAqrihFwc= +google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -989,16 +883,13 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1012,8 +903,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1024,15 +915,12 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/obsidian/impl/v1/BUILD.bazel b/obsidian/impl/v1/BUILD.bazel index c9e4ad4c..2a146458 100644 --- a/obsidian/impl/v1/BUILD.bazel +++ b/obsidian/impl/v1/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "//vendor/github.com/gogo/status", "//vendor/github.com/ory/hydra-client-go/v2:hydra-client-go", "//vendor/github.com/sirupsen/logrus", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//metadata", "@org_golang_google_grpc//status", diff --git a/obsidian/proto/v1/BUILD.bazel b/obsidian/proto/v1/BUILD.bazel index 4ae739ee..5c73f5ef 100644 --- a/obsidian/proto/v1/BUILD.bazel +++ b/obsidian/proto/v1/BUILD.bazel @@ -1,8 +1,8 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") +load("@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2:defs.bzl", "protoc_gen_openapiv2") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") -load("@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2:defs.bzl", "protoc_gen_openapiv2") load("@openapi_tools_generator_bazel//:defs.bzl", "openapi_generator") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "obsidianpb_proto", diff --git a/patches/circl.patch b/patches/circl.patch new file mode 100644 index 00000000..7561cb51 --- /dev/null +++ b/patches/circl.patch @@ -0,0 +1,58 @@ +diff --git a/dh/x25519/BUILD.bazel b/dh/x25519/BUILD.bazel +index d569bc1..13a942d 100644 +--- a/dh/x25519/BUILD.bazel ++++ b/dh/x25519/BUILD.bazel +@@ -12,7 +12,9 @@ go_library( + "doc.go", + "key.go", + "table.go", ++ "//math/fp25519:fp_amd64.h", + ], ++ cgo = True, + importpath = "github.com/cloudflare/circl/dh/x25519", + visibility = ["//visibility:public"], + deps = [ +diff --git a/dh/x448/BUILD.bazel b/dh/x448/BUILD.bazel +index ed287c6..cdbd4f1 100644 +--- a/dh/x448/BUILD.bazel ++++ b/dh/x448/BUILD.bazel +@@ -12,7 +12,9 @@ go_library( + "doc.go", + "key.go", + "table.go", ++ "//math/fp448:fp_amd64.h", + ], ++ cgo = True, + importpath = "github.com/cloudflare/circl/dh/x448", + visibility = ["//visibility:public"], + deps = [ +diff --git a/math/fp25519/BUILD.bazel b/math/fp25519/BUILD.bazel +index c9973ac..e98be64 100644 +--- a/math/fp25519/BUILD.bazel ++++ b/math/fp25519/BUILD.bazel +@@ -1,5 +1,10 @@ + load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + ++exports_files( ++ ["fp_amd64.h"], ++ visibility = ["//dh/x25519:__pkg__"], ++) ++ + go_library( + name = "fp25519", + srcs = [ +diff --git a/math/fp448/BUILD.bazel b/math/fp448/BUILD.bazel +index c371ca5..ebc055d 100644 +--- a/math/fp448/BUILD.bazel ++++ b/math/fp448/BUILD.bazel +@@ -1,5 +1,10 @@ + load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + ++exports_files( ++ ["fp_amd64.h"], ++ visibility = ["//dh/x448:__pkg__"], ++) ++ + go_library( + name = "fp448", + srcs = [ diff --git a/peridot/admin/v1/BUILD.bazel b/peridot/admin/v1/BUILD.bazel index b55cc83b..b1f2b63f 100644 --- a/peridot/admin/v1/BUILD.bazel +++ b/peridot/admin/v1/BUILD.bazel @@ -18,7 +18,7 @@ go_library( "//utils", "//vendor/github.com/sirupsen/logrus", "//vendor/go.temporal.io/sdk/client", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", diff --git a/peridot/builder/v1/workflow/import.go b/peridot/builder/v1/workflow/import.go index 4b9963ae..dbab7db0 100644 --- a/peridot/builder/v1/workflow/import.go +++ b/peridot/builder/v1/workflow/import.go @@ -1125,6 +1125,7 @@ func (c *Controller) UpstreamDistGitActivity(ctx context.Context, greq *Upstream Version: project.MajorVersion, StorageAddr: fmt.Sprintf("s3://%s", viper.GetString("s3-bucket")), Package: greq.Package.Name, + PackageGitName: gitlabify(greq.Package.Name), ModulePrefix: modulePrefix, RpmPrefix: rpmPrefix, HttpUsername: projectKeys.GitlabUsername, @@ -1169,6 +1170,7 @@ func (c *Controller) UpstreamDistGitActivity(ctx context.Context, greq *Upstream Version: project.MajorVersion, StorageAddr: fmt.Sprintf("s3://%s", viper.GetString("s3-bucket")), Package: greq.Package.Name, + PackageGitName: gitlabify(greq.Package.Name), ModulePrefix: modulePrefix, RpmPrefix: rpmPrefix, HttpUsername: projectKeys.GitlabUsername, diff --git a/peridot/cmd/v1/peridot/BUILD.bazel b/peridot/cmd/v1/peridot/BUILD.bazel index 337b19ec..e7850a69 100644 --- a/peridot/cmd/v1/peridot/BUILD.bazel +++ b/peridot/cmd/v1/peridot/BUILD.bazel @@ -11,10 +11,10 @@ go_library( "lookaside_upload.go", "main.go", "project.go", - "project_info.go", - "project_list.go", "project_catalog_sync.go", "project_create_hashed_repos.go", + "project_info.go", + "project_list.go", "utils.go", ], data = [ diff --git a/peridot/cmd/v1/peridotbuilder/BUILD.bazel b/peridot/cmd/v1/peridotbuilder/BUILD.bazel index 0d915f90..98fc9ed5 100644 --- a/peridot/cmd/v1/peridotbuilder/BUILD.bazel +++ b/peridot/cmd/v1/peridotbuilder/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "//vendor/github.com/spf13/cobra", "//vendor/github.com/spf13/viper", "//vendor/go.temporal.io/sdk/client", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_protobuf//encoding/protojson", "@org_golang_google_protobuf//types/known/anypb", diff --git a/peridot/cmd/v1/yumrepofsupdater/BUILD.bazel b/peridot/cmd/v1/yumrepofsupdater/BUILD.bazel index a6b19a65..8e2e6f6d 100644 --- a/peridot/cmd/v1/yumrepofsupdater/BUILD.bazel +++ b/peridot/cmd/v1/yumrepofsupdater/BUILD.bazel @@ -19,7 +19,7 @@ go_library( "//vendor/github.com/sirupsen/logrus", "//vendor/github.com/spf13/cobra", "//vendor/go.temporal.io/sdk/client", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials/insecure", ], ) diff --git a/peridot/impl/v1/BUILD.bazel b/peridot/impl/v1/BUILD.bazel index a575bf92..27fa9a47 100644 --- a/peridot/impl/v1/BUILD.bazel +++ b/peridot/impl/v1/BUILD.bazel @@ -29,7 +29,7 @@ go_library( "//vendor/github.com/sirupsen/logrus", "//vendor/go.temporal.io/sdk/client", "@org_golang_google_genproto_googleapis_api//httpbody", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//status", diff --git a/peridot/keykeeper/v1/BUILD.bazel b/peridot/keykeeper/v1/BUILD.bazel index e2853deb..6c50414b 100644 --- a/peridot/keykeeper/v1/BUILD.bazel +++ b/peridot/keykeeper/v1/BUILD.bazel @@ -34,7 +34,7 @@ go_library( "@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library", "@com_github_protonmail_go_crypto//openpgp", "@org_golang_google_genproto_googleapis_rpc//errdetails", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//types/known/anypb", diff --git a/peridot/proto/v1/BUILD.bazel b/peridot/proto/v1/BUILD.bazel index b16ca842..380fc950 100644 --- a/peridot/proto/v1/BUILD.bazel +++ b/peridot/proto/v1/BUILD.bazel @@ -1,8 +1,8 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2:defs.bzl", "protoc_gen_openapiv2") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@openapi_tools_generator_bazel//:defs.bzl", "openapi_generator") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "peridotpb_proto", diff --git a/peridot/proto/v1/admin/BUILD.bazel b/peridot/proto/v1/admin/BUILD.bazel index e6c38307..b33c0dfc 100644 --- a/peridot/proto/v1/admin/BUILD.bazel +++ b/peridot/proto/v1/admin/BUILD.bazel @@ -1,6 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "adminpb_proto", diff --git a/peridot/proto/v1/keykeeper/BUILD.bazel b/peridot/proto/v1/keykeeper/BUILD.bazel index 23adb065..5b8a3fa1 100644 --- a/peridot/proto/v1/keykeeper/BUILD.bazel +++ b/peridot/proto/v1/keykeeper/BUILD.bazel @@ -1,6 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "keykeeperpb_proto", diff --git a/peridot/proto/v1/yumrepofs/BUILD.bazel b/peridot/proto/v1/yumrepofs/BUILD.bazel index 0a27301b..46925dad 100644 --- a/peridot/proto/v1/yumrepofs/BUILD.bazel +++ b/peridot/proto/v1/yumrepofs/BUILD.bazel @@ -1,6 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "yumrepofspb_proto", diff --git a/peridot/yumrepofs/v1/BUILD.bazel b/peridot/yumrepofs/v1/BUILD.bazel index cc0f3541..3171923e 100644 --- a/peridot/yumrepofs/v1/BUILD.bazel +++ b/peridot/yumrepofs/v1/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//vendor/github.com/sirupsen/logrus", "//vendor/github.com/spf13/viper", "@org_golang_google_genproto_googleapis_api//httpbody", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//metadata", "@org_golang_google_grpc//status", diff --git a/proto/BUILD.bazel b/proto/BUILD.bazel index cd1b1195..f823b363 100644 --- a/proto/BUILD.bazel +++ b/proto/BUILD.bazel @@ -1,6 +1,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "commonpb_proto", diff --git a/protoc-gen-openapiv2/options/BUILD b/protoc-gen-openapiv2/options/BUILD index b49cb077..12c9af5a 100644 --- a/protoc-gen-openapiv2/options/BUILD +++ b/protoc-gen-openapiv2/options/BUILD @@ -1,6 +1,6 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "options_proto", diff --git a/repositories.bzl b/repositories.bzl index b659e1bd..afee6074 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -1,6 +1,12 @@ load("@bazel_gazelle//:deps.bzl", "go_repository") def go_repositories(): + go_repository( + name = "cat_dario_mergo", + importpath = "dario.cat/mergo", + sum = "h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=", + version = "v1.0.0", + ) go_repository( name = "co_honnef_go_tools", importpath = "honnef.co/go/tools", @@ -13,12 +19,6 @@ def go_repositories(): sum = "h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=", version = "v0.0.0-20180116102854-5a71ef0e047d", ) - go_repository( - name = "com_github_alcortesm_tgz", - importpath = "github.com/alcortesm/tgz", - sum = "h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=", - version = "v0.0.0-20161220082320-9c5fe88206d7", - ) go_repository( name = "com_github_alecthomas_template", importpath = "github.com/alecthomas/template", @@ -34,8 +34,8 @@ def go_repositories(): go_repository( name = "com_github_anmitsu_go_shlex", importpath = "github.com/anmitsu/go-shlex", - sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=", - version = "v0.0.0-20161002113705-648efa622239", + sum = "h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=", + version = "v0.0.0-20200514113438-38f4b401e2be", ) go_repository( name = "com_github_antihax_optional", @@ -43,23 +43,11 @@ def go_repositories(): sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", version = "v1.0.0", ) - go_repository( - name = "com_github_armon_circbuf", - importpath = "github.com/armon/circbuf", - sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=", - version = "v0.0.0-20150827004946-bbbad097214e", - ) go_repository( name = "com_github_armon_go_metrics", importpath = "github.com/armon/go-metrics", - sum = "h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=", - version = "v0.0.0-20180917152333-f0300d1749da", - ) - go_repository( - name = "com_github_armon_go_radix", - importpath = "github.com/armon/go-radix", - sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=", - version = "v0.0.0-20180808171621-7fddfc383310", + sum = "h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=", + version = "v0.4.1", ) go_repository( name = "com_github_armon_go_socks5", @@ -88,8 +76,8 @@ def go_repositories(): go_repository( name = "com_github_aws_aws_sdk_go", importpath = "github.com/aws/aws-sdk-go", - sum = "h1:yld8Rc8OCahLtenY1mnve4w1jVeBu/rSiscGzodaDOs=", - version = "v1.44.129", + sum = "h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=", + version = "v1.54.19", ) go_repository( name = "com_github_benbjohnson_clock", @@ -103,23 +91,11 @@ def go_repositories(): sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=", version = "v1.0.1", ) - go_repository( - name = "com_github_bgentry_speakeasy", - importpath = "github.com/bgentry/speakeasy", - sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=", - version = "v0.1.0", - ) - go_repository( - name = "com_github_bketelsen_crypt", - importpath = "github.com/bketelsen/crypt", - sum = "h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=", - version = "v0.0.3-0.20200106085610-5cbc8cc4026c", - ) go_repository( name = "com_github_bluekeyes_go_gitdiff", importpath = "github.com/bluekeyes/go-gitdiff", - sum = "h1:AXrIoy/VEA9Baz2lhwMlpdzDJ/sKof6C9yTt1oqw4hQ=", - version = "v0.5.0", + sum = "h1:SElKwtm/IQPOwKs0vdowW5uAlip+P+jatagmUU8E0r4=", + version = "v0.7.3", ) go_repository( name = "com_github_burntsushi_toml", @@ -133,6 +109,12 @@ def go_repositories(): sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", version = "v0.0.0-20160522181843-27f122750802", ) + go_repository( + name = "com_github_bwesterb_go_ristretto", + importpath = "github.com/bwesterb/go-ristretto", + sum = "h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw=", + version = "v1.2.3", + ) go_repository( name = "com_github_cavaliergopher_rpm", importpath = "github.com/cavaliergopher/rpm", @@ -160,8 +142,8 @@ def go_repositories(): go_repository( name = "com_github_cespare_xxhash_v2", importpath = "github.com/cespare/xxhash/v2", - sum = "h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=", - version = "v2.2.0", + sum = "h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=", + version = "v2.3.0", ) go_repository( name = "com_github_chzyer_logex", @@ -187,29 +169,25 @@ def go_repositories(): sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", version = "v0.3.4", ) + go_repository( + name = "com_github_cloudflare_circl", + importpath = "github.com/cloudflare/circl", + patch_args = ["-p1"], + patches = ["//patches:circl.patch"], + sum = "h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE=", + version = "v1.3.9", + ) go_repository( name = "com_github_cncf_udpa_go", importpath = "github.com/cncf/udpa/go", - sum = "h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=", - version = "v0.0.0-20220112060539-c52dc94e7fbe", + sum = "h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=", + version = "v0.0.0-20210930031921-04548b0d99d4", ) go_repository( name = "com_github_cncf_xds_go", importpath = "github.com/cncf/xds/go", - sum = "h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=", - version = "v0.0.0-20231128003011-0fa0005c9caa", - ) - go_repository( - name = "com_github_coreos_bbolt", - importpath = "github.com/coreos/bbolt", - sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=", - version = "v1.3.2", - ) - go_repository( - name = "com_github_coreos_etcd", - importpath = "github.com/coreos/etcd", - sum = "h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=", - version = "v3.3.13+incompatible", + sum = "h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=", + version = "v0.0.0-20240423153145-555b57ec207b", ) go_repository( name = "com_github_coreos_go_oidc_v3", @@ -224,22 +202,16 @@ def go_repositories(): version = "v0.3.0", ) go_repository( - name = "com_github_coreos_go_systemd", - importpath = "github.com/coreos/go-systemd", - sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=", - version = "v0.0.0-20190321100706-95778dfbb74e", - ) - go_repository( - name = "com_github_coreos_pkg", - importpath = "github.com/coreos/pkg", - sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=", - version = "v0.0.0-20180928190104-399ea9e2e55f", + name = "com_github_coreos_go_systemd_v22", + importpath = "github.com/coreos/go-systemd/v22", + sum = "h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=", + version = "v22.3.2", ) go_repository( name = "com_github_cpuguy83_go_md2man_v2", importpath = "github.com/cpuguy83/go-md2man/v2", - sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=", - version = "v2.0.0", + sum = "h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=", + version = "v2.0.4", ) go_repository( name = "com_github_creack_pty", @@ -247,24 +219,23 @@ def go_repositories(): sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=", version = "v1.1.9", ) + go_repository( + name = "com_github_cyphar_filepath_securejoin", + importpath = "github.com/cyphar/filepath-securejoin", + sum = "h1:tXpmbiaeBrS/K2US8nhgwdKYnfAOnVfkcLPKFgFHeA0=", + version = "v0.3.0", + ) go_repository( name = "com_github_davecgh_go_spew", importpath = "github.com/davecgh/go-spew", - sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", - version = "v1.1.1", + sum = "h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=", + version = "v1.1.2-0.20180830191138-d8f796af33cc", ) go_repository( - name = "com_github_dgrijalva_jwt_go", - importpath = "github.com/dgrijalva/jwt-go", - replace = "github.com/golang-jwt/jwt/v4", - sum = "h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=", - version = "v4.4.2", - ) - go_repository( - name = "com_github_dgryski_go_sip13", - importpath = "github.com/dgryski/go-sip13", - sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=", - version = "v0.0.0-20181026042036-e10d5fee7954", + name = "com_github_elazarl_goproxy", + importpath = "github.com/elazarl/goproxy", + sum = "h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=", + version = "v0.0.0-20230808193330-2592e75ae04a", ) go_repository( name = "com_github_emicklei_go_restful_v3", @@ -275,8 +246,8 @@ def go_repositories(): go_repository( name = "com_github_emirpasic_gods", importpath = "github.com/emirpasic/gods", - sum = "h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=", - version = "v1.12.0", + sum = "h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=", + version = "v1.18.1", ) go_repository( name = "com_github_envoyproxy_go_control_plane", @@ -315,16 +286,16 @@ def go_repositories(): version = "v1.0.4", ) go_repository( - name = "com_github_flynn_go_shlex", - importpath = "github.com/flynn/go-shlex", - sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=", - version = "v0.0.0-20150515145356-3f9db97f8568", + name = "com_github_frankban_quicktest", + importpath = "github.com/frankban/quicktest", + sum = "h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=", + version = "v1.14.6", ) go_repository( name = "com_github_fsnotify_fsnotify", importpath = "github.com/fsnotify/fsnotify", - sum = "h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=", - version = "v1.4.9", + sum = "h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=", + version = "v1.7.0", ) go_repository( name = "com_github_ghodss_yaml", @@ -335,8 +306,8 @@ def go_repositories(): go_repository( name = "com_github_gliderlabs_ssh", importpath = "github.com/gliderlabs/ssh", - sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=", - version = "v0.2.2", + sum = "h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=", + version = "v0.3.7", ) go_repository( name = "com_github_go_chi_chi", @@ -347,26 +318,26 @@ def go_repositories(): go_repository( name = "com_github_go_git_gcfg", importpath = "github.com/go-git/gcfg", - sum = "h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=", - version = "v1.5.0", + sum = "h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=", + version = "v1.5.1-0.20230307220236-3a3c6141e376", ) go_repository( name = "com_github_go_git_go_billy_v5", importpath = "github.com/go-git/go-billy/v5", - sum = "h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=", - version = "v5.3.1", + sum = "h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=", + version = "v5.5.0", ) go_repository( name = "com_github_go_git_go_git_fixtures_v4", importpath = "github.com/go-git/go-git-fixtures/v4", - sum = "h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=", - version = "v4.0.2-0.20200613231340-f56387b50c12", + sum = "h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=", + version = "v4.3.2-0.20231010084843-55a94097c399", ) go_repository( name = "com_github_go_git_go_git_v5", importpath = "github.com/go-git/go-git/v5", - sum = "h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI=", - version = "v5.2.0", + sum = "h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=", + version = "v5.12.0", ) go_repository( name = "com_github_go_gl_glfw", @@ -401,8 +372,8 @@ def go_repositories(): go_repository( name = "com_github_go_logr_logr", importpath = "github.com/go-logr/logr", - sum = "h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=", - version = "v1.4.1", + sum = "h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=", + version = "v1.4.2", ) go_repository( name = "com_github_go_logr_stdr", @@ -473,8 +444,8 @@ def go_repositories(): go_repository( name = "com_github_golang_glog", importpath = "github.com/golang/glog", - sum = "h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=", - version = "v1.2.0", + sum = "h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=", + version = "v1.2.1", ) go_repository( name = "com_github_golang_groupcache", @@ -491,8 +462,8 @@ def go_repositories(): go_repository( name = "com_github_golang_protobuf", importpath = "github.com/golang/protobuf", - sum = "h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=", - version = "v1.5.3", + sum = "h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=", + version = "v1.5.4", ) go_repository( name = "com_github_golang_snappy", @@ -545,8 +516,8 @@ def go_repositories(): go_repository( name = "com_github_google_martian_v3", importpath = "github.com/google/martian/v3", - sum = "h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=", - version = "v3.3.2", + sum = "h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=", + version = "v3.3.3", ) go_repository( name = "com_github_google_pprof", @@ -585,20 +556,14 @@ def go_repositories(): ], build_file_proto_mode = "disable", importpath = "github.com/googleapis/gax-go/v2", - sum = "h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM=", - version = "v2.12.1", + sum = "h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=", + version = "v2.12.5", ) go_repository( name = "com_github_googleapis_google_cloud_go_testing", importpath = "github.com/googleapis/google-cloud-go-testing", - sum = "h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=", - version = "v0.0.0-20200911160855-bcd43fbb19e8", - ) - go_repository( - name = "com_github_gopherjs_gopherjs", - importpath = "github.com/gopherjs/gopherjs", - sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=", - version = "v0.0.0-20181017120253-0766667cb4d1", + sum = "h1:zC34cGQu69FG7qzJ3WiKW244WfhDC3xxYMeNOX2gtUQ=", + version = "v0.0.0-20210719221736-1c9a4c676720", ) go_repository( name = "com_github_gorilla_websocket", @@ -639,56 +604,38 @@ def go_repositories(): go_repository( name = "com_github_hashicorp_consul_api", importpath = "github.com/hashicorp/consul/api", - sum = "h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=", - version = "v1.1.0", - ) - go_repository( - name = "com_github_hashicorp_consul_sdk", - importpath = "github.com/hashicorp/consul/sdk", - sum = "h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=", - version = "v0.1.1", + sum = "h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=", + version = "v1.28.2", ) go_repository( name = "com_github_hashicorp_errwrap", importpath = "github.com/hashicorp/errwrap", - sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=", - version = "v1.0.0", + sum = "h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=", + version = "v1.1.0", ) go_repository( name = "com_github_hashicorp_go_cleanhttp", importpath = "github.com/hashicorp/go-cleanhttp", - sum = "h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=", - version = "v0.5.1", + sum = "h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=", + version = "v0.5.2", ) go_repository( name = "com_github_hashicorp_go_hclog", importpath = "github.com/hashicorp/go-hclog", - sum = "h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=", - version = "v0.9.2", + sum = "h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=", + version = "v1.5.0", ) go_repository( name = "com_github_hashicorp_go_immutable_radix", importpath = "github.com/hashicorp/go-immutable-radix", - sum = "h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_go_msgpack", - importpath = "github.com/hashicorp/go-msgpack", - sum = "h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=", - version = "v0.5.3", + sum = "h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=", + version = "v1.3.1", ) go_repository( name = "com_github_hashicorp_go_multierror", importpath = "github.com/hashicorp/go-multierror", - sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_go_net", - importpath = "github.com/hashicorp/go.net", - sum = "h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=", - version = "v0.0.1", + sum = "h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=", + version = "v1.1.1", ) go_repository( name = "com_github_hashicorp_go_retryablehttp", @@ -699,32 +646,14 @@ def go_repositories(): go_repository( name = "com_github_hashicorp_go_rootcerts", importpath = "github.com/hashicorp/go-rootcerts", - sum = "h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_go_sockaddr", - importpath = "github.com/hashicorp/go-sockaddr", - sum = "h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_go_syslog", - importpath = "github.com/hashicorp/go-syslog", - sum = "h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_go_uuid", - importpath = "github.com/hashicorp/go-uuid", - sum = "h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=", - version = "v1.0.1", + sum = "h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=", + version = "v1.0.2", ) go_repository( name = "com_github_hashicorp_golang_lru", importpath = "github.com/hashicorp/golang-lru", - sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=", - version = "v0.5.1", + sum = "h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=", + version = "v0.5.4", ) go_repository( name = "com_github_hashicorp_hcl", @@ -732,29 +661,11 @@ def go_repositories(): sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=", version = "v1.0.0", ) - go_repository( - name = "com_github_hashicorp_logutils", - importpath = "github.com/hashicorp/logutils", - sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_mdns", - importpath = "github.com/hashicorp/mdns", - sum = "h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_hashicorp_memberlist", - importpath = "github.com/hashicorp/memberlist", - sum = "h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=", - version = "v0.1.3", - ) go_repository( name = "com_github_hashicorp_serf", importpath = "github.com/hashicorp/serf", - sum = "h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=", - version = "v0.8.2", + sum = "h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=", + version = "v0.10.1", ) go_repository( name = "com_github_iancoleman_strcase", @@ -771,14 +682,14 @@ def go_repositories(): go_repository( name = "com_github_imdario_mergo", importpath = "github.com/imdario/mergo", - sum = "h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=", - version = "v0.3.11", + sum = "h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=", + version = "v0.3.6", ) go_repository( name = "com_github_inconshreveable_mousetrap", importpath = "github.com/inconshreveable/mousetrap", - sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=", - version = "v1.0.0", + sum = "h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=", + version = "v1.1.0", ) go_repository( name = "com_github_jbenet_go_context", @@ -786,12 +697,6 @@ def go_repositories(): sum = "h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=", version = "v0.0.0-20150711004518-d14ea06fba99", ) - go_repository( - name = "com_github_jessevdk_go_flags", - importpath = "github.com/jessevdk/go-flags", - sum = "h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=", - version = "v1.4.0", - ) go_repository( name = "com_github_jmespath_go_jmespath", importpath = "github.com/jmespath/go-jmespath", @@ -810,12 +715,6 @@ def go_repositories(): sum = "h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w=", version = "v1.3.4", ) - go_repository( - name = "com_github_jonboulle_clockwork", - importpath = "github.com/jonboulle/clockwork", - sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=", - version = "v0.1.0", - ) go_repository( name = "com_github_josharian_intern", importpath = "github.com/josharian/intern", @@ -840,12 +739,6 @@ def go_repositories(): sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=", version = "v0.9.1", ) - go_repository( - name = "com_github_jtolds_gls", - importpath = "github.com/jtolds/gls", - sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=", - version = "v4.20.0+incompatible", - ) go_repository( name = "com_github_julienschmidt_httprouter", importpath = "github.com/julienschmidt/httprouter", @@ -861,8 +754,8 @@ def go_repositories(): go_repository( name = "com_github_kevinburke_ssh_config", importpath = "github.com/kevinburke/ssh_config", - sum = "h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=", - version = "v0.0.0-20190725054713-01f96b0aa0cd", + sum = "h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=", + version = "v1.2.0", ) go_repository( name = "com_github_kisielk_errcheck", @@ -876,6 +769,12 @@ def go_repositories(): sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", version = "v1.0.0", ) + go_repository( + name = "com_github_klauspost_compress", + importpath = "github.com/klauspost/compress", + sum = "h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=", + version = "v1.17.2", + ) go_repository( name = "com_github_konsorten_go_windows_terminal_sequences", importpath = "github.com/konsorten/go-windows-terminal-sequences", @@ -933,8 +832,8 @@ def go_repositories(): go_repository( name = "com_github_magiconair_properties", importpath = "github.com/magiconair/properties", - sum = "h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=", - version = "v1.8.1", + sum = "h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=", + version = "v1.8.7", ) go_repository( name = "com_github_mailru_easyjson", @@ -973,16 +872,10 @@ def go_repositories(): version = "v1.0.1", ) go_repository( - name = "com_github_miekg_dns", - importpath = "github.com/miekg/dns", - sum = "h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=", - version = "v1.0.14", - ) - go_repository( - name = "com_github_mitchellh_cli", - importpath = "github.com/mitchellh/cli", - sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=", - version = "v1.0.0", + name = "com_github_microsoft_go_winio", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=", + version = "v0.6.2", ) go_repository( name = "com_github_mitchellh_go_homedir", @@ -990,29 +883,17 @@ def go_repositories(): sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=", version = "v1.1.0", ) - go_repository( - name = "com_github_mitchellh_go_testing_interface", - importpath = "github.com/mitchellh/go-testing-interface", - sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_mitchellh_gox", - importpath = "github.com/mitchellh/gox", - sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=", - version = "v0.4.0", - ) - go_repository( - name = "com_github_mitchellh_iochan", - importpath = "github.com/mitchellh/iochan", - sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=", - version = "v1.0.0", - ) go_repository( name = "com_github_mitchellh_mapstructure", importpath = "github.com/mitchellh/mapstructure", - sum = "h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=", - version = "v1.4.1", + sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_mmcloughlin_avo", + importpath = "github.com/mmcloughlin/avo", + sum = "h1:nAco9/aI9Lg2kiuROBY6BhCI/z0t5jEvJfjWbL8qXLU=", + version = "v0.5.0", ) go_repository( name = "com_github_moby_spdystream", @@ -1050,6 +931,24 @@ def go_repositories(): sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=", version = "v0.0.0-20140419014527-cca7078d478f", ) + go_repository( + name = "com_github_nats_io_nats_go", + importpath = "github.com/nats-io/nats.go", + sum = "h1:fnxnPCNiwIG5w08rlMcEKTUw4AV/nKyGCOJE8TdhSPk=", + version = "v1.34.0", + ) + go_repository( + name = "com_github_nats_io_nkeys", + importpath = "github.com/nats-io/nkeys", + sum = "h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=", + version = "v0.4.7", + ) + go_repository( + name = "com_github_nats_io_nuid", + importpath = "github.com/nats-io/nuid", + sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=", + version = "v1.0.1", + ) go_repository( name = "com_github_niemeyer_pretty", importpath = "github.com/niemeyer/pretty", @@ -1062,12 +961,6 @@ def go_repositories(): sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=", version = "v0.0.0-20170623195520-56545f4a5d46", ) - go_repository( - name = "com_github_oklog_ulid", - importpath = "github.com/oklog/ulid", - sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=", - version = "v1.3.1", - ) go_repository( name = "com_github_oneofone_xxhash", importpath = "github.com/OneOfOne/xxhash", @@ -1104,12 +997,6 @@ def go_repositories(): sum = "h1:g8hw0YQD5Us1aAgZj7OyBmBGSDwlnY9/2Pb/pQQq8YE=", version = "v2.2.0", ) - go_repository( - name = "com_github_pascaldekloe_goe", - importpath = "github.com/pascaldekloe/goe", - sum = "h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=", - version = "v0.0.0-20180627143212-57f6aae5913c", - ) go_repository( name = "com_github_pborman_uuid", importpath = "github.com/pborman/uuid", @@ -1119,8 +1006,14 @@ def go_repositories(): go_repository( name = "com_github_pelletier_go_toml", importpath = "github.com/pelletier/go-toml", - sum = "h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=", - version = "v1.8.1", + sum = "h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=", + version = "v1.9.5", + ) + go_repository( + name = "com_github_pelletier_go_toml_v2", + importpath = "github.com/pelletier/go-toml/v2", + sum = "h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=", + version = "v2.2.2", ) go_repository( name = "com_github_peterbourgon_diskv", @@ -1128,6 +1021,12 @@ def go_repositories(): sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=", version = "v2.0.1+incompatible", ) + go_repository( + name = "com_github_pjbgf_sha1cd", + importpath = "github.com/pjbgf/sha1cd", + sum = "h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=", + version = "v0.3.0", + ) go_repository( name = "com_github_pkg_errors", importpath = "github.com/pkg/errors", @@ -1137,20 +1036,14 @@ def go_repositories(): go_repository( name = "com_github_pkg_sftp", importpath = "github.com/pkg/sftp", - sum = "h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=", - version = "v1.13.1", + sum = "h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=", + version = "v1.13.6", ) go_repository( name = "com_github_pmezard_go_difflib", importpath = "github.com/pmezard/go-difflib", - sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", - version = "v1.0.0", - ) - go_repository( - name = "com_github_posener_complete", - importpath = "github.com/posener/complete", - sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=", - version = "v1.1.1", + sum = "h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=", + version = "v1.0.1-0.20181226105442-5d4384ee4fb2", ) go_repository( name = "com_github_prometheus_client_golang", @@ -1176,29 +1069,23 @@ def go_repositories(): sum = "h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=", version = "v0.8.0", ) - go_repository( - name = "com_github_prometheus_tsdb", - importpath = "github.com/prometheus/tsdb", - sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=", - version = "v0.7.1", - ) go_repository( name = "com_github_protonmail_go_crypto", importpath = "github.com/ProtonMail/go-crypto", - sum = "h1:J2FzIrXN82q5uyUraeJpLIm7U6PffRwje2ORho5yIik=", - version = "v0.0.0-20220113124808-70ae35bab23f", + sum = "h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=", + version = "v1.0.0", ) go_repository( name = "com_github_protonmail_go_mime", importpath = "github.com/ProtonMail/go-mime", - sum = "h1:CGq7OieOz3wyQJ1fO8S0eO9TCW1JyvLrf8fhzz1i8ko=", - version = "v0.0.0-20220302105931-303f85f7fe0f", + sum = "h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=", + version = "v0.0.0-20230322103455-7d82a3887f2f", ) go_repository( name = "com_github_protonmail_gopenpgp_v2", importpath = "github.com/ProtonMail/gopenpgp/v2", - sum = "h1:V3xeelvXgJiZXZuPtSSE+uYbtPw4RmbmyPqXDAESPhg=", - version = "v2.4.7", + sum = "h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=", + version = "v2.7.5", ) go_repository( name = "com_github_rivo_uniseg", @@ -1215,8 +1102,8 @@ def go_repositories(): go_repository( name = "com_github_rocky_linux_srpmproc", importpath = "github.com/rocky-linux/srpmproc", - sum = "h1:PnfuOQbGZEwOEr1u7Notz9Pm+Ol38Tu7mo8/63rgeaE=", - version = "v0.4.3", + sum = "h1:lX+Pp6HIsus7kp9xSuifcH7b6jrELfOnUpr87ia5KMc=", + version = "v0.6.4", ) go_repository( name = "com_github_rogpeppe_fastuuid", @@ -1227,62 +1114,56 @@ def go_repositories(): go_repository( name = "com_github_rogpeppe_go_internal", importpath = "github.com/rogpeppe/go-internal", - sum = "h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=", - version = "v1.10.0", + sum = "h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=", + version = "v1.11.0", ) go_repository( name = "com_github_russross_blackfriday_v2", importpath = "github.com/russross/blackfriday/v2", - sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=", - version = "v2.0.1", + sum = "h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=", + version = "v2.1.0", ) go_repository( - name = "com_github_ryanuber_columnize", - importpath = "github.com/ryanuber/columnize", - sum = "h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=", - version = "v0.0.0-20160712163229-9b3edd62028f", + name = "com_github_sagikazarmark_crypt", + importpath = "github.com/sagikazarmark/crypt", + sum = "h1:WMyLTjHBo64UvNcWqpzY3pbZTYgnemZU8FBZigKc42E=", + version = "v0.19.0", ) go_repository( - name = "com_github_sean_seed", - importpath = "github.com/sean-/seed", - sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=", - version = "v0.0.0-20170313163322-e2103e2c3529", + name = "com_github_sagikazarmark_locafero", + importpath = "github.com/sagikazarmark/locafero", + sum = "h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_sagikazarmark_slog_shim", + importpath = "github.com/sagikazarmark/slog-shim", + sum = "h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=", + version = "v0.1.0", ) go_repository( name = "com_github_sergi_go_diff", importpath = "github.com/sergi/go-diff", - sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", - version = "v1.1.0", - ) - go_repository( - name = "com_github_shurcool_sanitized_anchor_name", - importpath = "github.com/shurcooL/sanitized_anchor_name", - sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=", - version = "v1.0.0", + sum = "h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=", + version = "v1.3.2-0.20230802210424-5b0b94c5c0d3", ) go_repository( name = "com_github_sirupsen_logrus", importpath = "github.com/sirupsen/logrus", - sum = "h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=", - version = "v1.8.1", + sum = "h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=", + version = "v1.9.3", ) go_repository( - name = "com_github_smartystreets_assertions", - importpath = "github.com/smartystreets/assertions", - sum = "h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=", - version = "v0.0.0-20180927180507-b2de0cb4f26d", + name = "com_github_skeema_knownhosts", + importpath = "github.com/skeema/knownhosts", + sum = "h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=", + version = "v1.2.2", ) go_repository( - name = "com_github_smartystreets_goconvey", - importpath = "github.com/smartystreets/goconvey", - sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=", - version = "v1.6.4", - ) - go_repository( - name = "com_github_soheilhy_cmux", - importpath = "github.com/soheilhy/cmux", - sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=", - version = "v0.1.4", + name = "com_github_sourcegraph_conc", + importpath = "github.com/sourcegraph/conc", + sum = "h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=", + version = "v0.3.0", ) go_repository( name = "com_github_spaolacci_murmur3", @@ -1293,26 +1174,26 @@ def go_repositories(): go_repository( name = "com_github_spf13_afero", importpath = "github.com/spf13/afero", - sum = "h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=", - version = "v1.10.0", + sum = "h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=", + version = "v1.11.0", ) go_repository( name = "com_github_spf13_cast", importpath = "github.com/spf13/cast", - sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=", - version = "v1.3.0", + sum = "h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=", + version = "v1.6.0", ) go_repository( name = "com_github_spf13_cobra", importpath = "github.com/spf13/cobra", - sum = "h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=", - version = "v1.1.3", + sum = "h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=", + version = "v1.8.1", ) go_repository( name = "com_github_spf13_jwalterweatherman", importpath = "github.com/spf13/jwalterweatherman", - sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=", - version = "v1.0.0", + sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=", + version = "v1.1.0", ) go_repository( name = "com_github_spf13_pflag", @@ -1323,32 +1204,26 @@ def go_repositories(): go_repository( name = "com_github_spf13_viper", importpath = "github.com/spf13/viper", - sum = "h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=", - version = "v1.7.1", + sum = "h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=", + version = "v1.19.0", ) go_repository( name = "com_github_stretchr_objx", importpath = "github.com/stretchr/objx", - sum = "h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=", - version = "v0.5.0", + sum = "h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=", + version = "v0.5.2", ) go_repository( name = "com_github_stretchr_testify", importpath = "github.com/stretchr/testify", - sum = "h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=", - version = "v1.8.4", + sum = "h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=", + version = "v1.9.0", ) go_repository( name = "com_github_subosito_gotenv", importpath = "github.com/subosito/gotenv", - sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=", - version = "v1.2.0", - ) - go_repository( - name = "com_github_tmc_grpc_websocket_proxy", - importpath = "github.com/tmc/grpc-websocket-proxy", - sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=", - version = "v0.0.0-20190109142713-0ad062ec5ee5", + sum = "h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=", + version = "v1.6.0", ) go_repository( name = "com_github_vbauerster_mpb_v7", @@ -1371,14 +1246,8 @@ def go_repositories(): go_repository( name = "com_github_xanzy_ssh_agent", importpath = "github.com/xanzy/ssh-agent", - sum = "h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=", - version = "v0.2.1", - ) - go_repository( - name = "com_github_xiang90_probing", - importpath = "github.com/xiang90/probing", - sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=", - version = "v0.0.0-20190116061207-43a291ad63a2", + sum = "h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=", + version = "v0.3.3", ) go_repository( name = "com_github_yuin_goldmark", @@ -1389,710 +1258,722 @@ def go_repositories(): go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", - sum = "h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=", - version = "v0.112.0", + sum = "h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=", + version = "v0.115.0", ) go_repository( name = "com_google_cloud_go_accessapproval", importpath = "cloud.google.com/go/accessapproval", - sum = "h1:uzmAMSgYcnlHa9X9YSQZ4Q1wlfl4NNkZyQgho1Z6p04=", - version = "v1.7.5", + sum = "h1:mp1X2FsNRdTYTVw4b6eF4OQ+7l6EpLnZlcatXiFWJTg=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_accesscontextmanager", importpath = "cloud.google.com/go/accesscontextmanager", - sum = "h1:2GLNaNu9KRJhJBFTIVRoPwk6xE5mUDgD47abBq4Zp/I=", - version = "v1.8.5", + sum = "h1:oVjc3eFQP92zezKsof5ly6ENhuNSsgadRdFKhUn7L9g=", + version = "v1.8.9", ) go_repository( name = "com_google_cloud_go_aiplatform", importpath = "cloud.google.com/go/aiplatform", - sum = "h1:0cSrii1ZeLr16MbBoocyy5KVnrSdiQ3KN/vtrTe7RqE=", - version = "v1.60.0", + sum = "h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U=", + version = "v1.68.0", ) go_repository( name = "com_google_cloud_go_analytics", importpath = "cloud.google.com/go/analytics", - sum = "h1:Q+y94XH84jM8SK8O7qiY/PJRexb6n7dRbQ6PiUa4YGM=", - version = "v0.23.0", + sum = "h1:5c425wSQBb+YAGr7ukgRFRAKa8SwlqTSapbb+CTJAEA=", + version = "v0.23.4", ) go_repository( name = "com_google_cloud_go_apigateway", importpath = "cloud.google.com/go/apigateway", - sum = "h1:sPXnpk+6TneKIrjCjcpX5YGsAKy3PTdpIchoj8/74OE=", - version = "v1.6.5", + sum = "h1:vxZBKroYYCplsNrjggtniokb83Rk9mDitaiBN9nppdQ=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_apigeeconnect", importpath = "cloud.google.com/go/apigeeconnect", - sum = "h1:CrfIKv9Go3fh/QfQgisU3MeP90Ww7l/sVGmr3TpECo8=", - version = "v1.6.5", + sum = "h1:WO8XlUGugxvdKBj5hQnv8l7+SsVXgJVA97iNXyFgUb8=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_apigeeregistry", importpath = "cloud.google.com/go/apigeeregistry", - sum = "h1:C+QU2K+DzDjk4g074ouwHQGkoff1h5OMQp6sblCVreQ=", - version = "v0.8.3", + sum = "h1:K05SFNKzwvApZqVUcwg/6oFzn/b9WUrDN8pIdfD51qU=", + version = "v0.8.7", ) go_repository( name = "com_google_cloud_go_appengine", importpath = "cloud.google.com/go/appengine", - sum = "h1:l2SviT44zWQiOv8bPoMBzW0vOcMO22iO0s+nVtVhdts=", - version = "v1.8.5", + sum = "h1:rI/aezyrwereUE0i/umbA6rZIgpJpBImFcy3JJEcQd0=", + version = "v1.8.9", ) go_repository( name = "com_google_cloud_go_area120", importpath = "cloud.google.com/go/area120", - sum = "h1:vTs08KPLN/iMzTbxpu5ciL06KcsrVPMjz4IwcQyZ4uY=", - version = "v0.8.5", + sum = "h1:38uHviqcdB2S83yPfOXzDxf0KTG/W2DsXMuY/uf2T8c=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_artifactregistry", importpath = "cloud.google.com/go/artifactregistry", - sum = "h1:W9sVlyb1VRcUf83w7aM3yMsnp4HS4PoyGqYQNG0O5lI=", - version = "v1.14.7", + sum = "h1:NZzHn5lPKyi2kgtM9Atu6IBvqslL7Fu1+5EkOYZd+yk=", + version = "v1.14.11", ) go_repository( name = "com_google_cloud_go_asset", importpath = "cloud.google.com/go/asset", - sum = "h1:xgFnBP3luSbUcC9RWJvb3Zkt+y/wW6PKwPHr3ssnIP8=", - version = "v1.17.2", + sum = "h1:vl8wy3jpRa3ATctym5tiICp70iymSyOVbpKb3tKA668=", + version = "v1.19.3", ) go_repository( name = "com_google_cloud_go_assuredworkloads", importpath = "cloud.google.com/go/assuredworkloads", - sum = "h1:gCrN3IyvqY3cP0wh2h43d99CgH3G+WYs9CeuFVKChR8=", - version = "v1.11.5", + sum = "h1:xMjLtM24zy8yWGZlNtYxXo9fBj7ArWTsNkXKlRBZlqw=", + version = "v1.11.9", + ) + go_repository( + name = "com_google_cloud_go_auth", + importpath = "cloud.google.com/go/auth", + sum = "h1:Iv1bbpzJ2OIg16m94XI9/tlzZZl3cdeR3nGVGj78N7s=", + version = "v0.7.1", + ) + go_repository( + name = "com_google_cloud_go_auth_oauth2adapt", + importpath = "cloud.google.com/go/auth/oauth2adapt", + sum = "h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=", + version = "v0.2.3", ) go_repository( name = "com_google_cloud_go_automl", importpath = "cloud.google.com/go/automl", - sum = "h1:ijiJy9sYWh75WrqImXsfWc1e3HR3iO+ef9fvW03Ig/4=", - version = "v1.13.5", + sum = "h1:GzYpU33Zo2tQ+8amLasjeBPawpKfBYnLGHVMQcyiFv4=", + version = "v1.13.9", ) go_repository( name = "com_google_cloud_go_baremetalsolution", importpath = "cloud.google.com/go/baremetalsolution", - sum = "h1:LFydisRmS7hQk9P/YhekwuZGqb45TW4QavcrMToWo5A=", - version = "v1.2.4", + sum = "h1:mM8zaxertfV5gaNGloJdJY87z7l8WcNkhw96VB1IGTQ=", + version = "v1.2.8", ) go_repository( name = "com_google_cloud_go_batch", importpath = "cloud.google.com/go/batch", - sum = "h1:2HK4JerwVaIcCh/lJiHwh6+uswPthiMMWhiSWLELayk=", - version = "v1.8.0", + sum = "h1:WlOqpQMOtWvOLIs7vCxBwYZGaB76i3olsBCVUvszY3M=", + version = "v1.9.0", ) go_repository( name = "com_google_cloud_go_beyondcorp", importpath = "cloud.google.com/go/beyondcorp", - sum = "h1:qs0J0O9Ol2h1yA0AU+r7l3hOCPzs2MjE1d6d/kaHIKo=", - version = "v1.0.4", + sum = "h1:Dw9VO8fzZ2GWsfgx4wST03hjaZmRNv09lt0GZuzyVxM=", + version = "v1.0.8", ) go_repository( name = "com_google_cloud_go_bigquery", importpath = "cloud.google.com/go/bigquery", - sum = "h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4=", - version = "v1.59.1", + sum = "h1:w2Goy9n6gh91LVi6B2Sc+HpBl8WbWhIyzdvVvrAuEIw=", + version = "v1.61.0", ) go_repository( name = "com_google_cloud_go_billing", importpath = "cloud.google.com/go/billing", - sum = "h1:oWUEQvuC4JvtnqLZ35zgzdbuHt4Itbftvzbe6aEyFdE=", - version = "v1.18.2", + sum = "h1:1Y7DdC2i8JQctWpd1ycra5iK+2LzwgFi+TxTqF3Yyp8=", + version = "v1.18.7", ) go_repository( name = "com_google_cloud_go_binaryauthorization", importpath = "cloud.google.com/go/binaryauthorization", - sum = "h1:1jcyh2uIUwSZkJ/JmL8kd5SUkL/Krbv8zmYLEbAz6kY=", - version = "v1.8.1", + sum = "h1:ly5gQoJGHbuOM7E+pND38pTiQ0pZ4zTEOfJlfyfIIew=", + version = "v1.8.5", ) go_repository( name = "com_google_cloud_go_certificatemanager", importpath = "cloud.google.com/go/certificatemanager", - sum = "h1:UMBr/twXvH3jcT5J5/YjRxf2tvwTYIfrpemTebe0txc=", - version = "v1.7.5", + sum = "h1:feyxS5Q8eWQNXQcVAcdooQEKGT/1B/qCcYvamOen7fc=", + version = "v1.8.3", ) go_repository( name = "com_google_cloud_go_channel", importpath = "cloud.google.com/go/channel", - sum = "h1:/omiBnyFjm4S1ETHoOmJbL7LH7Ljcei4rYG6Sj3hc80=", - version = "v1.17.5", + sum = "h1:rqF5CjW6KnOmlVZ75PNkuXYh5nh8dIsIWQjHLLwPy3Y=", + version = "v1.17.9", ) go_repository( name = "com_google_cloud_go_cloudbuild", importpath = "cloud.google.com/go/cloudbuild", - sum = "h1:ZB6oOmJo+MTov9n629fiCrO9YZPOg25FZvQ7gIHu5ng=", - version = "v1.15.1", + sum = "h1:BIT0cFWQDT4XTVMyyZsjXvltVqBwvJ/RAKIRBqkgXf0=", + version = "v1.16.3", ) go_repository( name = "com_google_cloud_go_clouddms", importpath = "cloud.google.com/go/clouddms", - sum = "h1:Sr0Zo5EAcPQiCBgHWICg3VGkcdS/LLP1d9SR7qQBM/s=", - version = "v1.7.4", + sum = "h1:WEz8ECgv4ZinRc84xcW1wTsFfLNb60yrSsIdsEJFRDk=", + version = "v1.7.8", ) go_repository( name = "com_google_cloud_go_cloudtasks", importpath = "cloud.google.com/go/cloudtasks", - sum = "h1:EUt1hIZ9bLv8Iz9yWaCrqgMnIU+Tdh0yXM1MMVGhjfE=", - version = "v1.12.6", + sum = "h1:2mdGqvYFm9HwPh//ckbcX8mZJgyG+F1TWk+82+eLuwM=", + version = "v1.12.10", ) go_repository( name = "com_google_cloud_go_compute", importpath = "cloud.google.com/go/compute", - sum = "h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=", - version = "v1.24.0", + sum = "h1:5cE5hdrwJV/92ravlwIFRGnyH9CpLGhh4N0ZDVTU+BA=", + version = "v1.27.2", ) go_repository( name = "com_google_cloud_go_compute_metadata", importpath = "cloud.google.com/go/compute/metadata", - sum = "h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=", - version = "v0.2.3", + sum = "h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=", + version = "v0.5.0", ) go_repository( name = "com_google_cloud_go_contactcenterinsights", importpath = "cloud.google.com/go/contactcenterinsights", - sum = "h1:6Vs/YnDG5STGjlWMEjN/xtmft7MrOTOnOZYUZtGTx0w=", - version = "v1.13.0", + sum = "h1:BvtC33BbX+3p+v+VB0AZ6djRrcXP+qPqfWsIR+kz5v8=", + version = "v1.13.4", ) go_repository( name = "com_google_cloud_go_container", importpath = "cloud.google.com/go/container", - sum = "h1:MAaNH7VRNPWEhvqOypq2j+7ONJKrKzon4v9nS3nLZe0=", - version = "v1.31.0", + sum = "h1:g5zm1SUBZ+q7IvtI5hM/6xcpf2C/bFfN2EXzS07Iz9k=", + version = "v1.37.2", ) go_repository( name = "com_google_cloud_go_containeranalysis", importpath = "cloud.google.com/go/containeranalysis", - sum = "h1:doJ0M1ljS4hS0D2UbHywlHGwB7sQLNrt9vFk9Zyi7vY=", - version = "v0.11.4", + sum = "h1:1rkYgK2szbRH311mRw/3lkeEOqrjN+2gOD7AZhMUxZw=", + version = "v0.11.8", ) go_repository( name = "com_google_cloud_go_datacatalog", importpath = "cloud.google.com/go/datacatalog", - sum = "h1:A0vKYCQdxQuV4Pi0LL9p39Vwvg4jH5yYveMv50gU5Tw=", - version = "v1.19.3", + sum = "h1:lzMtWaUlaz9Bd9anvq2KBZwcFujzhVuxhIz1MsqRJv8=", + version = "v1.20.3", ) go_repository( name = "com_google_cloud_go_dataflow", importpath = "cloud.google.com/go/dataflow", - sum = "h1:RYHtcPhmE664+F0Je46p+NvFbG8z//KCXp+uEqB4jZU=", - version = "v0.9.5", + sum = "h1:7qTWGXfpM2z3assRznIXJLw+XJNlucHFcvFAYhclQ+o=", + version = "v0.9.9", ) go_repository( name = "com_google_cloud_go_dataform", importpath = "cloud.google.com/go/dataform", - sum = "h1:5e4eqGrd0iDTCg4Q+VlAao5j2naKAA7xRurNtwmUknU=", - version = "v0.9.2", + sum = "h1:8BMoPO9CD3qmPqnunVi73JcvwQrkjLILPXZKFExsjZc=", + version = "v0.9.6", ) go_repository( name = "com_google_cloud_go_datafusion", importpath = "cloud.google.com/go/datafusion", - sum = "h1:HQ/BUOP8OIGJxuztpYvNvlb+/U+/Bfs9SO8tQbh61fk=", - version = "v1.7.5", + sum = "h1:ZwicZskyu64L8Y6+zvZQjIav5A1xYwM0nqpk88HKLmY=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_datalabeling", importpath = "cloud.google.com/go/datalabeling", - sum = "h1:GpIFRdm0qIZNsxqURFJwHt0ZBJZ0nF/mUVEigR7PH/8=", - version = "v0.8.5", + sum = "h1:4ndOrLlhYErzhJGciRJx+s33+6P4cS23GnROnfaJ6hE=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_dataplex", importpath = "cloud.google.com/go/dataplex", - sum = "h1:fxIfdU8fxzR3clhOoNI7XFppvAmndxDu1AMH+qX9WKQ=", - version = "v1.14.2", + sum = "h1:kXCHm9TqTr5BhZnsSD32iCRmf1S+Hho+UDqXr3Gdw7s=", + version = "v1.18.0", ) go_repository( name = "com_google_cloud_go_dataproc_v2", importpath = "cloud.google.com/go/dataproc/v2", - sum = "h1:/u81Fd+BvCLp+xjctI1DiWVJn6cn9/s3Akc8xPH02yk=", - version = "v2.4.0", + sum = "h1:AYq1wJCKHrSG4KtxMQPkn1b0/uaHULHbXXTukCgou90=", + version = "v2.5.1", ) go_repository( name = "com_google_cloud_go_dataqna", importpath = "cloud.google.com/go/dataqna", - sum = "h1:9ybXs3nr9BzxSGC04SsvtuXaHY0qmJSLIpIAbZo9GqQ=", - version = "v0.8.5", + sum = "h1:7kiDfd4c/pSW8jmeeOac/H+PYgwLrIt4L88s4JiFRZU=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_datastore", importpath = "cloud.google.com/go/datastore", - sum = "h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg=", - version = "v1.15.0", + sum = "h1:6Me8ugrAOAxssGhSo8im0YSuy4YvYk4mbGvCadAH5aE=", + version = "v1.17.1", ) go_repository( name = "com_google_cloud_go_datastream", importpath = "cloud.google.com/go/datastream", - sum = "h1:o1QDKMo/hk0FN7vhoUQURREuA0rgKmnYapB+1M+7Qz4=", - version = "v1.10.4", + sum = "h1:INvIDTnuti68Lmdizp2GUyRFjN9k3X7IowX0Ixy9Vto=", + version = "v1.10.8", ) go_repository( name = "com_google_cloud_go_deploy", importpath = "cloud.google.com/go/deploy", - sum = "h1:m27Ojwj03gvpJqCbodLYiVmE9x4/LrHGGMjzc0LBfM4=", - version = "v1.17.1", + sum = "h1:C8T/Hna2lDE8qbwP75G+iJclnrIj7oblBoQoc1cfDWc=", + version = "v1.19.2", ) go_repository( name = "com_google_cloud_go_dialogflow", importpath = "cloud.google.com/go/dialogflow", - sum = "h1:KqG0oxGE71qo0lRVyAoeBozefCvsMfcDzDjoLYSY0F4=", - version = "v1.49.0", + sum = "h1:uS7IDkXIUR5EduLfyPmgTpZ27RcUIHby7JKsk4fBPdo=", + version = "v1.54.2", ) go_repository( name = "com_google_cloud_go_dlp", importpath = "cloud.google.com/go/dlp", - sum = "h1:lTipOuJaSjlYnnotPMbEhKURLC6GzCMDDzVbJAEbmYM=", - version = "v1.11.2", + sum = "h1:oR15Jcd/grn//eftZ/B0DJ99lTaeN8vOf8TK5xhKEvc=", + version = "v1.14.2", ) go_repository( name = "com_google_cloud_go_documentai", importpath = "cloud.google.com/go/documentai", - sum = "h1:lI62GMEEPO6vXJI9hj+G9WjOvnR0hEjvjokrnex4cxA=", - version = "v1.25.0", + sum = "h1:D75r7hqnc9Zz6aRV8fzs/1V94R5YIv+FDJivUT4r+n4=", + version = "v1.30.3", ) go_repository( name = "com_google_cloud_go_domains", importpath = "cloud.google.com/go/domains", - sum = "h1:Mml/R6s3vQQvFPpi/9oX3O5dRirgjyJ8cksK8N19Y7g=", - version = "v0.9.5", + sum = "h1:kIqgwkIph6Mw+m1nWafdEBrGqPPZ1J98hqO11gkL4BM=", + version = "v0.9.9", ) go_repository( name = "com_google_cloud_go_edgecontainer", importpath = "cloud.google.com/go/edgecontainer", - sum = "h1:tBY32km78ScpK2aOP84JoW/+wtpx5WluyPUSEE3270U=", - version = "v1.1.5", + sum = "h1:F5UsQ/A4GjkV9dTBi3KMFGXPa/6OdTk5/Dce2bdYonM=", + version = "v1.2.3", ) go_repository( name = "com_google_cloud_go_errorreporting", importpath = "cloud.google.com/go/errorreporting", - sum = "h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=", - version = "v0.3.0", + sum = "h1:E/gLk+rL7u5JZB9oq72iL1bnhVlLrnfslrgcptjJEUE=", + version = "v0.3.1", ) go_repository( name = "com_google_cloud_go_essentialcontacts", importpath = "cloud.google.com/go/essentialcontacts", - sum = "h1:13eHn5qBnsawxI7mIrv4jRIEmQ1xg0Ztqw5ZGqtUNfA=", - version = "v1.6.6", + sum = "h1:zI+3LgjRcv7StB7O35sWqCg79OKDx5sRR4GAq36fi+s=", + version = "v1.6.10", ) go_repository( name = "com_google_cloud_go_eventarc", importpath = "cloud.google.com/go/eventarc", - sum = "h1:ORkd6/UV5FIdA8KZQDLNZYKS7BBOrj0p01DXPmT4tE4=", - version = "v1.13.4", + sum = "h1:2sbz7e95cv6zm2mNrMJlAQ6J93qQsGCQzw4lYa5GWJQ=", + version = "v1.13.8", ) go_repository( name = "com_google_cloud_go_filestore", importpath = "cloud.google.com/go/filestore", - sum = "h1:X5G4y/vrUo1B8Nsz93qSWTMAcM8LXbGUldq33OdcdCw=", - version = "v1.8.1", + sum = "h1:yAHY3pGq6/IX4sLQqPpfaqfnSk1LmCdVkWNwzIP4X7c=", + version = "v1.8.5", ) go_repository( name = "com_google_cloud_go_firestore", importpath = "cloud.google.com/go/firestore", - sum = "h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw=", - version = "v1.14.0", + sum = "h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8=", + version = "v1.15.0", ) go_repository( name = "com_google_cloud_go_functions", importpath = "cloud.google.com/go/functions", - sum = "h1:IWVylmK5F6hJ3R5zaRW7jI5PrWhCvtBVU4axQLmXSo4=", - version = "v1.16.0", + sum = "h1:+mNEYegIO1ToQXsWEhEI6cI1lm+VAeu0pAmc+atYOaY=", + version = "v1.16.4", ) go_repository( name = "com_google_cloud_go_gkebackup", importpath = "cloud.google.com/go/gkebackup", - sum = "h1:iuE8KNtTsPOc79qeWoNS8zOWoXPD9SAdOmwgxtlCmh8=", - version = "v1.3.5", + sum = "h1:sdGeTG6O+JPI7rRiVNy7wO4r4CELChfNe7C8BWPOJRM=", + version = "v1.5.2", ) go_repository( name = "com_google_cloud_go_gkeconnect", importpath = "cloud.google.com/go/gkeconnect", - sum = "h1:17d+ZSSXKqG/RwZCq3oFMIWLPI8Zw3b8+a9/BEVlwH0=", - version = "v0.8.5", + sum = "h1:cXA4NWFlB174ub2kIaGLGrKxgTFjDWPzEs766i6Frww=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_gkehub", importpath = "cloud.google.com/go/gkehub", - sum = "h1:RboLNFzf9wEMSo7DrKVBlf+YhK/A/jrLN454L5Tz99Q=", - version = "v0.14.5", + sum = "h1:fWHBKtPwH7Wp5JjNxlPLanYYmXj6XuHjIRk6oa4yqkY=", + version = "v0.14.9", ) go_repository( name = "com_google_cloud_go_gkemulticloud", importpath = "cloud.google.com/go/gkemulticloud", - sum = "h1:rsSZAGLhyjyE/bE2ToT5fqo1qSW7S+Ubsc9jFOcbhSI=", - version = "v1.1.1", + sum = "h1:Msgg//raevqYlNZ+N8HFfO707wYVCyUnPKQPkt1g288=", + version = "v1.2.2", ) go_repository( name = "com_google_cloud_go_gsuiteaddons", importpath = "cloud.google.com/go/gsuiteaddons", - sum = "h1:CZEbaBwmbYdhFw21Fwbo+C35HMe36fTE0FBSR4KSfWg=", - version = "v1.6.5", + sum = "h1:uezUQ2jCcW4jkvB0tbJkMCNVdIa/qGgqnxEqOF8IvwY=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_iam", importpath = "cloud.google.com/go/iam", - sum = "h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=", - version = "v1.1.6", + sum = "h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw=", + version = "v1.1.11", ) go_repository( name = "com_google_cloud_go_iap", importpath = "cloud.google.com/go/iap", - sum = "h1:94zirc2r4t6KzhAMW0R6Dme005eTP6yf7g6vN4IhRrA=", - version = "v1.9.4", + sum = "h1:oqS5GMxyEDFndqwURKMIaRJ0GXygLJf/2bzue0WkrOU=", + version = "v1.9.8", ) go_repository( name = "com_google_cloud_go_ids", importpath = "cloud.google.com/go/ids", - sum = "h1:xd4U7pgl3GHV+MABnv1BF4/Vy/zBF7CYC8XngkOLzag=", - version = "v1.4.5", + sum = "h1:JIYwGad3q7kADDAIMw0E/3OR3vtDqjSliRBlWAm+WNk=", + version = "v1.4.9", ) go_repository( name = "com_google_cloud_go_iot", importpath = "cloud.google.com/go/iot", - sum = "h1:munTeBlbqI33iuTYgXy7S8lW2TCgi5l1hA4roSIY+EE=", - version = "v1.7.5", + sum = "h1:dsroR14QUU7i2/GC4AcEv1MvKS0VZCYWWTCxxyq2iYo=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_kms", importpath = "cloud.google.com/go/kms", - sum = "h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=", - version = "v1.15.7", + sum = "h1:EGgD0B9k9tOOkbPhYW1PHo2W0teamAUYMOUIcDRMfPk=", + version = "v1.18.2", ) go_repository( name = "com_google_cloud_go_language", importpath = "cloud.google.com/go/language", - sum = "h1:iaJZg6K4j/2PvZZVcjeO/btcWWIllVRBhuTFjGO4LXs=", - version = "v1.12.3", + sum = "h1:b8Ilb9pBrXj6aMMD0s8EEp28MSiBMo3FWPHAPNImIy4=", + version = "v1.12.7", ) go_repository( name = "com_google_cloud_go_lifesciences", importpath = "cloud.google.com/go/lifesciences", - sum = "h1:gXvN70m2p+4zgJFzaz6gMKaxTuF9WJ0USYoMLWAOm8g=", - version = "v0.9.5", + sum = "h1:b9AaxLtWOu9IShII4fdLVDOS03CVCsqWX5zXufyRrDU=", + version = "v0.9.9", ) go_repository( name = "com_google_cloud_go_logging", importpath = "cloud.google.com/go/logging", - sum = "h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw=", - version = "v1.9.0", + sum = "h1:f+ZXMqyrSJ5vZ5pE/zr0xC8y/M9BLNzQeLBwfeZ+wY4=", + version = "v1.10.0", ) go_repository( name = "com_google_cloud_go_longrunning", importpath = "cloud.google.com/go/longrunning", - sum = "h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=", - version = "v0.5.5", + sum = "h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k=", + version = "v0.5.9", ) go_repository( name = "com_google_cloud_go_managedidentities", importpath = "cloud.google.com/go/managedidentities", - sum = "h1:+bpih1piZVLxla/XBqeSUzJBp8gv9plGHIMAI7DLpDM=", - version = "v1.6.5", + sum = "h1:ktrpu0TWbtLm2wHUUOxXCftD2e8qZvtQZlFLjKyQXUA=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_maps", importpath = "cloud.google.com/go/maps", - sum = "h1:EVCZAiDvog9So46460BGbCasPhi613exoaQbpilMVlk=", - version = "v1.6.4", + sum = "h1:Un4DDZMLfvQT0kAne82lScQib5QJoBg2NDRVJkBokMg=", + version = "v1.11.3", ) go_repository( name = "com_google_cloud_go_mediatranslation", importpath = "cloud.google.com/go/mediatranslation", - sum = "h1:c76KdIXljQHSCb/Cy47S8H4s05A4zbK3pAFGzwcczZo=", - version = "v0.8.5", + sum = "h1:ptRvYRCZPwEk1oHIlSUg7a74czyS7VUP8869PXeaIT8=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_memcache", importpath = "cloud.google.com/go/memcache", - sum = "h1:yeDv5qxRedFosvpMSEswrqUsJM5OdWvssPHFliNFTc4=", - version = "v1.10.5", + sum = "h1:Wks0hJCdprJkYn0kYTW5oto3NodsGqn98Urvj3fJgX4=", + version = "v1.10.9", ) go_repository( name = "com_google_cloud_go_metastore", importpath = "cloud.google.com/go/metastore", - sum = "h1:dR7vqWXlK6IYR8Wbu9mdFfwlVjodIBhd1JRrpZftTEg=", - version = "v1.13.4", + sum = "h1:aGLOZ6tPsGveXVST2c6tf2mjFm5bEcBij8qh4qInz+I=", + version = "v1.13.8", ) go_repository( name = "com_google_cloud_go_monitoring", importpath = "cloud.google.com/go/monitoring", - sum = "h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4=", - version = "v1.18.0", + sum = "h1:XmM6uk4+mI2ZhWdI2n/2GNhJdpeQN+1VdG2UWEDhX48=", + version = "v1.20.1", ) go_repository( name = "com_google_cloud_go_networkconnectivity", importpath = "cloud.google.com/go/networkconnectivity", - sum = "h1:GBfXFhLyPspnaBE3nI/BRjdhW8vcbpT9QjE/4kDCDdc=", - version = "v1.14.4", + sum = "h1:PSOYigOrl3pTFfRBPQk5uRlxSxn0G1HY7FNZPGz5Quw=", + version = "v1.14.8", ) go_repository( name = "com_google_cloud_go_networkmanagement", importpath = "cloud.google.com/go/networkmanagement", - sum = "h1:aLV5GcosBNmd6M8+a0ekB0XlLRexv4fvnJJrYnqeBcg=", - version = "v1.9.4", + sum = "h1:CUX6YYtC6DpV0BzsaovqWExieVPDxmUxvQVlEjf0mwQ=", + version = "v1.13.4", ) go_repository( name = "com_google_cloud_go_networksecurity", importpath = "cloud.google.com/go/networksecurity", - sum = "h1:+caSxBTj0E8OYVh/5wElFdjEMO1S/rZtE1152Cepchc=", - version = "v0.9.5", + sum = "h1:DDqzpqx1u1vDiYW2bBr0r3A5kIw3D5f4RtQkWiRd7Jg=", + version = "v0.9.9", ) go_repository( name = "com_google_cloud_go_notebooks", importpath = "cloud.google.com/go/notebooks", - sum = "h1:FH48boYmrWVQ6k0Mx/WrnNafXncT5iSYxA8CNyWTgy0=", - version = "v1.11.3", + sum = "h1:/SeTEbFaU3cwzvc0ycM3nJ+8DvSTS8oeOWKi0bzEItM=", + version = "v1.11.7", ) go_repository( name = "com_google_cloud_go_optimization", importpath = "cloud.google.com/go/optimization", - sum = "h1:63NZaWyN+5rZEKHPX4ACpw3BjgyeuY8+rCehiCMaGPY=", - version = "v1.6.3", + sum = "h1:HFaCNq1upokZP4cPelqszhUShkmIypWma5IGe4fh4CA=", + version = "v1.6.7", ) go_repository( name = "com_google_cloud_go_orchestration", importpath = "cloud.google.com/go/orchestration", - sum = "h1:YHgWMlrPttIVGItgGfuvO2KM7x+y9ivN/Yk92pMm1a4=", - version = "v1.8.5", + sum = "h1:xwqKYWlnDMLETKpZmPg+edCehC7w4G11d/8JSqutC5I=", + version = "v1.9.4", ) go_repository( name = "com_google_cloud_go_orgpolicy", importpath = "cloud.google.com/go/orgpolicy", - sum = "h1:2JbXigqBJVp8Dx5dONUttFqewu4fP0p3pgOdIZAhpYU=", - version = "v1.12.1", + sum = "h1:NEbK9U6HuhjXOUI1+fJVdIEh0FHiJtGVq4kYQQ5B8t8=", + version = "v1.12.5", ) go_repository( name = "com_google_cloud_go_osconfig", importpath = "cloud.google.com/go/osconfig", - sum = "h1:Mo5jGAxOMKH/PmDY7fgY19yFcVbvwREb5D5zMPQjFfo=", - version = "v1.12.5", + sum = "h1:k+nAmaTcJ08BSR1yGadRZyLwRSvk5XgaZJinS1sEz4Q=", + version = "v1.13.0", ) go_repository( name = "com_google_cloud_go_oslogin", importpath = "cloud.google.com/go/oslogin", - sum = "h1:1K4nOT5VEZNt7XkhaTXupBYos5HjzvJMfhvyD2wWdFs=", - version = "v1.13.1", + sum = "h1:IptgM0b9yNJzEbC5rEetbRAcxsuRXDMuSX/65qASvE8=", + version = "v1.13.5", ) go_repository( name = "com_google_cloud_go_phishingprotection", importpath = "cloud.google.com/go/phishingprotection", - sum = "h1:DH3WFLzEoJdW/6xgsmoDqOwT1xddFi7gKu0QGZQhpGU=", - version = "v0.8.5", + sum = "h1:Gg3XeqWW0g97MKvexeMytrxu31UHDjUd0bbzHa40D8o=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_policytroubleshooter", importpath = "cloud.google.com/go/policytroubleshooter", - sum = "h1:c0WOzC6hz964QWNBkyKfna8A2jOIx1zzZa43Gx/P09o=", - version = "v1.10.3", + sum = "h1:A3KZBrc2Qzq5jQI8M8hW4GscOBZzIvoOhwRiE41pqcY=", + version = "v1.10.7", ) go_repository( name = "com_google_cloud_go_privatecatalog", importpath = "cloud.google.com/go/privatecatalog", - sum = "h1:UZ0assTnATXSggoxUIh61RjTQ4P9zCMk/kEMbn0nMYA=", - version = "v0.9.5", + sum = "h1:fV9+FuZuN6pup4h3qh/0HGpssJrkI5EyZVLQEEvzrA4=", + version = "v0.9.9", ) go_repository( name = "com_google_cloud_go_pubsub", importpath = "cloud.google.com/go/pubsub", - sum = "h1:dfEPuGCHGbWUhaMCTHUFjfroILEkx55iUmKBZTP5f+Y=", - version = "v1.36.1", + sum = "h1:0LdP+zj5XaPAGtWr2V6r88VXJlmtaB/+fde1q3TU8M0=", + version = "v1.40.0", ) go_repository( name = "com_google_cloud_go_pubsublite", importpath = "cloud.google.com/go/pubsublite", - sum = "h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY=", - version = "v1.8.1", + sum = "h1:jLQozsEVr+c6tOU13vDugtnaBSUy/PD5zK6mhm+uF1Y=", + version = "v1.8.2", ) go_repository( name = "com_google_cloud_go_recaptchaenterprise_v2", importpath = "cloud.google.com/go/recaptchaenterprise/v2", - sum = "h1:U3Wfq12X9cVMuTpsWDSURnXF0Z9hSPTHj+xsnXDRLsw=", - version = "v2.9.2", + sum = "h1:revhoyewcQrpKccogfKNO2ul3aQbD11BU+ZsRpOWlgw=", + version = "v2.14.0", ) go_repository( name = "com_google_cloud_go_recommendationengine", importpath = "cloud.google.com/go/recommendationengine", - sum = "h1:ineqLswaCSBY0csYv5/wuXJMBlxATK6Xc5jJkpiTEdM=", - version = "v0.8.5", + sum = "h1:jewIoRtf1F4WgtIDdPEKDqpvPU+utN02sFw3iYbmvwM=", + version = "v0.8.9", ) go_repository( name = "com_google_cloud_go_recommender", importpath = "cloud.google.com/go/recommender", - sum = "h1:LVLYS3r3u0MSCxQSDUtLSkporEGi9OAE6hGvayrZNPs=", - version = "v1.12.1", + sum = "h1:91NMrObmes2zA+gI0+QhCFH1oTPHlMGFTAJy5MTD2eg=", + version = "v1.12.5", ) go_repository( name = "com_google_cloud_go_redis", importpath = "cloud.google.com/go/redis", - sum = "h1:QF0maEdVv0Fj/2roU8sX3NpiDBzP9ICYTO+5F32gQNo=", - version = "v1.14.2", + sum = "h1:QbarPMu22tuUOqi3ynNKk2mQWl7xitMTxAaAUaBUFsE=", + version = "v1.16.2", ) go_repository( name = "com_google_cloud_go_resourcemanager", importpath = "cloud.google.com/go/resourcemanager", - sum = "h1:AZWr1vWVDKGwfLsVhcN+vcwOz3xqqYxtmMa0aABCMms=", - version = "v1.9.5", + sum = "h1:9JgRo4uBdCLJpWb6c+1+q7QPyWzH0LSCKUcF/IliKNk=", + version = "v1.9.9", ) go_repository( name = "com_google_cloud_go_resourcesettings", importpath = "cloud.google.com/go/resourcesettings", - sum = "h1:BTr5MVykJwClASci/7Og4Qfx70aQ4n3epsNLj94ZYgw=", - version = "v1.6.5", + sum = "h1:Q3udMNHhYLrzVNrCYEpZ6f70Rf6nHpiPFay1ILwcQ80=", + version = "v1.7.2", ) go_repository( name = "com_google_cloud_go_retail", importpath = "cloud.google.com/go/retail", - sum = "h1:Fn1GuAua1c6crCGqfJ1qMxG1Xh10Tg/x5EUODEHMqkw=", - version = "v1.16.0", + sum = "h1:RovE7VK3TEFDECBXwVWItL21+QQ4WY6otLCZHqExMBQ=", + version = "v1.17.2", ) go_repository( name = "com_google_cloud_go_run", importpath = "cloud.google.com/go/run", - sum = "h1:m9WDA7DzTpczhZggwYlZcBWgCRb+kgSIisWn1sbw2rQ=", - version = "v1.3.4", + sum = "h1:De6XlIBjzEFXPzDQ/hJgvieh4H/105mhkkwxL5DmH0o=", + version = "v1.3.9", ) go_repository( name = "com_google_cloud_go_scheduler", importpath = "cloud.google.com/go/scheduler", - sum = "h1:5U8iXLoQ03qOB+ZXlAecU7fiE33+u3QiM9nh4cd0eTE=", - version = "v1.10.6", + sum = "h1:KYdENFZip7O2Jk/zuNzEPIv+ZQokkWnNZ5AnrIuooYo=", + version = "v1.10.10", ) go_repository( name = "com_google_cloud_go_secretmanager", importpath = "cloud.google.com/go/secretmanager", - sum = "h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY=", - version = "v1.11.5", + sum = "h1:VqUVYY3U6uFXOhPdZgAoZH9m8E6p7eK02TsDRj2SBf4=", + version = "v1.13.3", ) go_repository( name = "com_google_cloud_go_security", importpath = "cloud.google.com/go/security", - sum = "h1:wTKJQ10j8EYgvE8Y+KhovxDRVDk2iv/OsxZ6GrLP3kE=", - version = "v1.15.5", + sum = "h1:pEkUeR1PFNwoFAIXPMa4PBCYb75UT8LmNfjQy1fm/Co=", + version = "v1.17.2", ) go_repository( name = "com_google_cloud_go_securitycenter", importpath = "cloud.google.com/go/securitycenter", - sum = "h1:/5jjkZ+uGe8hZ7pvd7pO30VW/a+pT2MrrdgOqjyucKQ=", - version = "v1.24.4", + sum = "h1:UJvalA9NoLhU0DWLa10qMSvMucEe+iQOqxC4/KGqMys=", + version = "v1.32.0", ) go_repository( name = "com_google_cloud_go_servicedirectory", importpath = "cloud.google.com/go/servicedirectory", - sum = "h1:da7HFI1229kyzIyuVEzHXip0cw0d+E0s8mjQby0WN+k=", - version = "v1.11.4", + sum = "h1:KivmF5S9i6av+7tgkHgcosC51jEtmC9UvgayezP2Uqo=", + version = "v1.11.9", ) go_repository( name = "com_google_cloud_go_shell", importpath = "cloud.google.com/go/shell", - sum = "h1:3Fq2hzO0ZSyaqBboJrFkwwf/qMufDtqwwA6ep8EZxEI=", - version = "v1.7.5", + sum = "h1:CPn8dHSJgZsIaMtGw5iMoF/6Ab7l5A2g34CIjVxlU3c=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_spanner", importpath = "cloud.google.com/go/spanner", - sum = "h1:fJq+ZfQUDHE+cy1li0bJA8+sy2oiSGhuGqN5nqVaZdU=", - version = "v1.57.0", + sum = "h1:ltyPbHA/nRAtAhU/o742dXBCI1eNHPeaRY09Ja8B+hM=", + version = "v1.64.0", ) go_repository( name = "com_google_cloud_go_speech", importpath = "cloud.google.com/go/speech", - sum = "h1:nuFc+Kj5B8de75nN4FdPyUbI2SiBoHZG6BLurXL56Q0=", - version = "v1.21.1", + sum = "h1:zuiX3ExV9jv1rrTFFyYZF5DvYys0/JByeErC50Hyw+g=", + version = "v1.23.3", ) go_repository( name = "com_google_cloud_go_storage", importpath = "cloud.google.com/go/storage", - sum = "h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=", - version = "v1.36.0", + sum = "h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=", + version = "v1.43.0", ) go_repository( name = "com_google_cloud_go_storagetransfer", importpath = "cloud.google.com/go/storagetransfer", - sum = "h1:dy4fL3wO0VABvzM05ycMUPFHxTPbJz9Em8ikAJVqSbI=", - version = "v1.10.4", + sum = "h1:hFCYNbls3DoAA49BZ8bWfmdUPfwLa708h1F6gPy76OE=", + version = "v1.10.8", ) go_repository( name = "com_google_cloud_go_talent", importpath = "cloud.google.com/go/talent", - sum = "h1:JssV0CE3FNujuSWn7SkosOzg7qrMxVnt6txOfGcMSa4=", - version = "v1.6.6", + sum = "h1:Zc1FO2NTLjCNztqnyll7DwKobFYomyCijRlqbJj+7mc=", + version = "v1.6.10", ) go_repository( name = "com_google_cloud_go_texttospeech", importpath = "cloud.google.com/go/texttospeech", - sum = "h1:dxY2Q5mHCbrGa3oPR2O3PCicdnvKa1JmwGQK36EFLOw=", - version = "v1.7.5", + sum = "h1:wn9UNRlEw+vCDFd2NBVPrNGFwB+n/cV20i81MBlbwas=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_tpu", importpath = "cloud.google.com/go/tpu", - sum = "h1:C8YyYda8WtNdBoCgFwwBzZd+S6+EScHOxM/z1h0NNp8=", - version = "v1.6.5", + sum = "h1:e6TbpIGmKdFFjW/OH8uQl0U0+t0K4TVN5mO2C+zBBtQ=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_trace", importpath = "cloud.google.com/go/trace", - sum = "h1:0pr4lIKJ5XZFYD9GtxXEWr0KkVeigc3wlGpZco0X1oA=", - version = "v1.10.5", + sum = "h1:Cy6D1Zdz8up4mIPUWModTuIGDr3fh7AZaCnR+uyxpgA=", + version = "v1.10.9", ) go_repository( name = "com_google_cloud_go_translate", importpath = "cloud.google.com/go/translate", - sum = "h1:upovZ0wRMdzZvXnu+RPam41B0mRJ+coRXFP2cYFJ7ew=", - version = "v1.10.1", + sum = "h1:HGFw8dhEp6xYCDWG5fRNwZHfY6MiyCh97RHBBkzsuNM=", + version = "v1.10.5", ) go_repository( name = "com_google_cloud_go_video", importpath = "cloud.google.com/go/video", - sum = "h1:TXwotxkShP1OqgKsbd+b8N5hrIHavSyLGvYnLGCZ7xc=", - version = "v1.20.4", + sum = "h1:f/Ez6k2aeN+1+XoAaFCTTqOD+oq8c38fHDi8vd9D3tg=", + version = "v1.21.2", ) go_repository( name = "com_google_cloud_go_videointelligence", importpath = "cloud.google.com/go/videointelligence", - sum = "h1:mYaWH8uhUCXLJCN3gdXswKzRa2+lK0zN6/KsIubm6pE=", - version = "v1.11.5", + sum = "h1:fGlVXtrk3mIh2DFIggTQ4xoA2VruiTkXZHCl6IDY0Bk=", + version = "v1.11.9", ) go_repository( name = "com_google_cloud_go_vision_v2", importpath = "cloud.google.com/go/vision/v2", - sum = "h1:W52z1b6LdGI66MVhE70g/NFty9zCYYcjdKuycqmlhtg=", - version = "v2.8.0", + sum = "h1:kBZ62LquS8V8u+N8wWTLgn2tHqaC4poQuGjRaaR+WGE=", + version = "v2.8.4", ) go_repository( name = "com_google_cloud_go_vmmigration", importpath = "cloud.google.com/go/vmmigration", - sum = "h1:5v9RT2vWyuw3pK2ox0HQpkoftO7Q7/8591dTxxQc79g=", - version = "v1.7.5", + sum = "h1:+X5Frseyehz8ZvnVSRZYXAwEEQXjS4oKK4EV/0KbS9s=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_vmwareengine", importpath = "cloud.google.com/go/vmwareengine", - sum = "h1:EGdDi9QbqThfZq3ILcDK5g+m9jTevc34AY5tACx5v7k=", - version = "v1.1.1", + sum = "h1:tzqTbh5CAqZDVJrEgbRGDFgPyCx5bjIPH5Cm0xqVamA=", + version = "v1.1.5", ) go_repository( name = "com_google_cloud_go_vpcaccess", importpath = "cloud.google.com/go/vpcaccess", - sum = "h1:XyL6hTLtEM/eE4F1GEge8xUN9ZCkiVWn44K/YA7z1rQ=", - version = "v1.7.5", + sum = "h1:LbQaXRQMTPCPmJKoVIW/2vvj80FCiGG+lAyOzNpKs6M=", + version = "v1.7.9", ) go_repository( name = "com_google_cloud_go_webrisk", importpath = "cloud.google.com/go/webrisk", - sum = "h1:251MvGuC8wisNN7+jqu9DDDZAi38KiMXxOpA/EWy4dE=", - version = "v1.9.5", + sum = "h1:WmSWTAIpQEKscbnbVUeWWdq+p11Q8P1Gn6ADI8yAQCI=", + version = "v1.9.9", ) go_repository( name = "com_google_cloud_go_websecurityscanner", importpath = "cloud.google.com/go/websecurityscanner", - sum = "h1:YqWZrZYabG88TZt7364XWRJGhxmxhony2ZUyZEYMF2k=", - version = "v1.6.5", + sum = "h1:4tbX6llT8kBqUJbpB4Wjj9sqWNYwCUGt3WP6uVVv00w=", + version = "v1.6.9", ) go_repository( name = "com_google_cloud_go_workflows", importpath = "cloud.google.com/go/workflows", - sum = "h1:uHNmUiatTbPQ4H1pabwfzpfEYD4BBnqDHqMm2IesOh4=", - version = "v1.12.4", + sum = "h1:n5SOGamA/HtlpWAIXxKXpuGq1ta3wDpyOftDgjIcNHU=", + version = "v1.12.8", ) go_repository( name = "com_shuralyov_dmitri_gpu_mtl", @@ -2100,6 +1981,12 @@ def go_repositories(): sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=", version = "v0.0.0-20190408044501-666a987793e9", ) + go_repository( + name = "dev_cel_expr", + importpath = "cel.dev/expr", + sum = "h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w=", + version = "v0.15.0", + ) go_repository( name = "in_gopkg_alecthomas_kingpin_v2", importpath = "gopkg.in/alecthomas/kingpin.v2", @@ -2127,14 +2014,8 @@ def go_repositories(): go_repository( name = "in_gopkg_ini_v1", importpath = "gopkg.in/ini.v1", - sum = "h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=", - version = "v1.57.0", - ) - go_repository( - name = "in_gopkg_resty_v1", - importpath = "gopkg.in/resty.v1", - sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=", - version = "v1.12.0", + sum = "h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=", + version = "v1.67.0", ) go_repository( name = "in_gopkg_square_go_jose_v2", @@ -2173,10 +2054,28 @@ def go_repositories(): version = "v1.4.0", ) go_repository( - name = "io_etcd_go_bbolt", - importpath = "go.etcd.io/bbolt", - sum = "h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=", - version = "v1.3.2", + name = "io_etcd_go_etcd_api_v3", + importpath = "go.etcd.io/etcd/api/v3", + sum = "h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=", + version = "v3.5.12", + ) + go_repository( + name = "io_etcd_go_etcd_client_pkg_v3", + importpath = "go.etcd.io/etcd/client/pkg/v3", + sum = "h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=", + version = "v3.5.12", + ) + go_repository( + name = "io_etcd_go_etcd_client_v2", + importpath = "go.etcd.io/etcd/client/v2", + sum = "h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI=", + version = "v2.305.12", + ) + go_repository( + name = "io_etcd_go_etcd_client_v3", + importpath = "go.etcd.io/etcd/client/v3", + sum = "h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=", + version = "v3.5.12", ) go_repository( name = "io_k8s_api", @@ -2247,38 +2146,38 @@ def go_repositories(): go_repository( name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - sum = "h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=", - version = "v0.49.0", + sum = "h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=", + version = "v0.53.0", ) go_repository( name = "io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp", importpath = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp", - sum = "h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=", - version = "v0.49.0", + sum = "h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=", + version = "v0.53.0", ) go_repository( name = "io_opentelemetry_go_otel", importpath = "go.opentelemetry.io/otel", - sum = "h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=", - version = "v1.24.0", + sum = "h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=", + version = "v1.28.0", ) go_repository( name = "io_opentelemetry_go_otel_metric", importpath = "go.opentelemetry.io/otel/metric", - sum = "h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=", - version = "v1.24.0", + sum = "h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=", + version = "v1.28.0", ) go_repository( name = "io_opentelemetry_go_otel_sdk", importpath = "go.opentelemetry.io/otel/sdk", - sum = "h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=", - version = "v1.21.0", + sum = "h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=", + version = "v1.24.0", ) go_repository( name = "io_opentelemetry_go_otel_trace", importpath = "go.opentelemetry.io/otel/trace", - sum = "h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=", - version = "v1.24.0", + sum = "h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=", + version = "v1.28.0", ) go_repository( name = "io_opentelemetry_go_proto_otlp", @@ -2319,8 +2218,8 @@ def go_repositories(): go_repository( name = "org_golang_google_api", importpath = "google.golang.org/api", - sum = "h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24=", - version = "v0.166.0", + sum = "h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=", + version = "v0.188.0", ) go_repository( name = "org_golang_google_appengine", @@ -2331,44 +2230,45 @@ def go_repositories(): go_repository( name = "org_golang_google_genproto", importpath = "google.golang.org/genproto", - sum = "h1:Zmyn5CV/jxzKnF+3d+xzbomACPwLQqVpLTpyXN5uTaQ=", - version = "v0.0.0-20240221002015-b0ce06bbee7c", + sum = "h1:/hmn0Ku5kWij/kjGsrcJeC1T/MrJi2iNWwgAqrihFwc=", + version = "v0.0.0-20240711142825-46eb208f015d", ) go_repository( name = "org_golang_google_genproto_googleapis_api", importpath = "google.golang.org/genproto/googleapis/api", - sum = "h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o=", - version = "v0.0.0-20240221002015-b0ce06bbee7c", + sum = "h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=", + version = "v0.0.0-20240711142825-46eb208f015d", ) go_repository( name = "org_golang_google_genproto_googleapis_bytestream", importpath = "google.golang.org/genproto/googleapis/bytestream", - sum = "h1:SlwxrP5Neh11ftq/nw7ooqXcJNSqf0VM7wFiGRk8J9Y=", - version = "v0.0.0-20240213162025-012b6fc9bca9", + sum = "h1:HD1mcQ5mRhp3kzY9/QUKQEg4f0UnbPXt94Ljd1l8PiQ=", + version = "v0.0.0-20240708141625-4ad9e859172b", ) go_repository( name = "org_golang_google_genproto_googleapis_rpc", importpath = "google.golang.org/genproto/googleapis/rpc", - sum = "h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=", - version = "v0.0.0-20240221002015-b0ce06bbee7c", + sum = "h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A=", + version = "v0.0.0-20240711142825-46eb208f015d", ) go_repository( name = "org_golang_google_protobuf", + build_directives = ["gazelle:proto disable"], importpath = "google.golang.org/protobuf", - sum = "h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=", - version = "v1.32.0", + sum = "h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=", + version = "v1.34.2", ) go_repository( name = "org_golang_x_crypto", importpath = "golang.org/x/crypto", - sum = "h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=", - version = "v0.19.0", + sum = "h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=", + version = "v0.25.0", ) go_repository( name = "org_golang_x_exp", importpath = "golang.org/x/exp", - sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=", - version = "v0.0.0-20200224162631-6cc2880d07d6", + sum = "h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA=", + version = "v0.0.0-20240716175740-e3f259677ff7", ) go_repository( name = "org_golang_x_image", @@ -2385,32 +2285,32 @@ def go_repositories(): go_repository( name = "org_golang_x_mobile", importpath = "golang.org/x/mobile", - sum = "h1:OVJ6QQUBAesB8CZijKDSsXX7xYVtUhrkY0gwMfbi4p4=", - version = "v0.0.0-20200801112145-973feb4309de", + sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=", + version = "v0.0.0-20190719004257-d2bd2a29d028", ) go_repository( name = "org_golang_x_mod", importpath = "golang.org/x/mod", - sum = "h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=", - version = "v0.14.0", + sum = "h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=", + version = "v0.19.0", ) go_repository( name = "org_golang_x_sync", importpath = "golang.org/x/sync", - sum = "h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=", - version = "v0.6.0", + sum = "h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=", + version = "v0.7.0", ) go_repository( name = "org_golang_x_sys", importpath = "golang.org/x/sys", - sum = "h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=", - version = "v0.17.0", + sum = "h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=", + version = "v0.22.0", ) go_repository( name = "org_golang_x_term", importpath = "golang.org/x/term", - sum = "h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=", - version = "v0.17.0", + sum = "h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=", + version = "v0.22.0", ) go_repository( name = "org_golang_x_time", @@ -2421,8 +2321,8 @@ def go_repositories(): go_repository( name = "org_golang_x_tools", importpath = "golang.org/x/tools", - sum = "h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=", - version = "v0.17.0", + sum = "h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=", + version = "v0.23.0", ) go_repository( name = "org_golang_x_xerrors", @@ -2433,8 +2333,8 @@ def go_repositories(): go_repository( name = "org_uber_go_atomic", importpath = "go.uber.org/atomic", - sum = "h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=", - version = "v1.9.0", + sum = "h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=", + version = "v1.11.0", ) go_repository( name = "org_uber_go_goleak", @@ -2445,12 +2345,12 @@ def go_repositories(): go_repository( name = "org_uber_go_multierr", importpath = "go.uber.org/multierr", - sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=", - version = "v1.6.0", + sum = "h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=", + version = "v1.11.0", ) go_repository( name = "org_uber_go_zap", importpath = "go.uber.org/zap", - sum = "h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4=", - version = "v1.18.1", + sum = "h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=", + version = "v1.21.0", ) diff --git a/rules_resf/internal/resf_bundle/BUILD b/rules_resf/internal/resf_bundle/BUILD index 68d56da9..6d91dc5b 100644 --- a/rules_resf/internal/resf_bundle/BUILD +++ b/rules_resf/internal/resf_bundle/BUILD @@ -1,8 +1,8 @@ -package(default_visibility = ["//rules_resf/internal:__subpackages__"]) - load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary") load(":resf_bundle.bzl", "WEBPACK_DATA") +package(default_visibility = ["//rules_resf/internal:__subpackages__"]) + exports_files([ "assembler.js", "webpack.config.js", diff --git a/servicecatalog/BUILD.bazel b/servicecatalog/BUILD.bazel index a5d07088..560ae9af 100644 --- a/servicecatalog/BUILD.bazel +++ b/servicecatalog/BUILD.bazel @@ -13,6 +13,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/authzed/grpcutil", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", ], ) diff --git a/temporalutils/BUILD.bazel b/temporalutils/BUILD.bazel index 6ddfb505..82a94d9b 100644 --- a/temporalutils/BUILD.bazel +++ b/temporalutils/BUILD.bazel @@ -11,7 +11,7 @@ go_library( "//vendor/github.com/spf13/viper", "//vendor/go.temporal.io/api/workflowservice/v1:workflowservice", "//vendor/go.temporal.io/sdk/client", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials", ], ) diff --git a/utils/BUILD.bazel b/utils/BUILD.bazel index 5edf4f3a..6a87eb61 100644 --- a/utils/BUILD.bazel +++ b/utils/BUILD.bazel @@ -39,7 +39,7 @@ go_library( "//vendor/github.com/vbauerster/mpb/v7:mpb", "//vendor/github.com/vbauerster/mpb/v7/decor", "@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials/insecure", "@org_golang_google_grpc//metadata", diff --git a/vendor/cloud.google.com/go/auth/BUILD.bazel b/vendor/cloud.google.com/go/auth/BUILD.bazel new file mode 100644 index 00000000..d76f6ed6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "auth", + srcs = [ + "auth.go", + "threelegged.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth", + importpath = "cloud.google.com/go/auth", + visibility = ["//visibility:public"], + deps = [ + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/jwt", + ], +) diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md new file mode 100644 index 00000000..aaabc68e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -0,0 +1,197 @@ +# Changelog + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) + + +### Features + +* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814) + + +### Bug Fixes + +* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19) + + +### Bug Fixes + +* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823) +* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15) + +### Breaking Changes + +In the below mentioned commits there were a few large breaking changes since the +last release of the module. + +1. The `Credentials` type has been moved to the root of the module as it is + becoming the core abstraction for the whole module. +2. Because of the above mentioned change many functions that previously + returned a `TokenProvider` now return `Credentials`. Similarly, these + functions have been renamed to be more specific. +3. Most places that used to take an optional `TokenProvider` now accept + `Credentials`. You can make a `Credentials` from a `TokenProvider` using the + constructor found in the `auth` package. +4. The `detect` package has been renamed to `credentials`. With this change some + function signatures were also updated for better readability. +5. Derivative auth flows like `impersonate` and `downscope` have been moved to + be under the new `credentials` package. + +Although these changes are disruptive we think that they are for the best of the +long-term health of the module. We do not expect any more large breaking changes +like these in future revisions, even before 1.0.0. This version will be the +first version of the auth library that our client libraries start to use and +depend on. + +### Features + +* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6)) +* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d)) +* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670) +* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501)) +* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac)) +* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d)) +* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) +* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b)) +* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd)) +* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824)) +* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9)) +* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10) + + +### Bug Fixes + +* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136) +* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## 0.1.0 (2023-10-18) + + +### Features + +* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e)) +* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993)) +* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8)) +* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7)) +* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c)) +* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5)) +* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff)) +* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe)) +* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04)) +* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507)) +* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE similarity index 100% rename from vendor/cloud.google.com/go/compute/LICENSE rename to vendor/cloud.google.com/go/auth/LICENSE diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md new file mode 100644 index 00000000..36de276a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/README.md @@ -0,0 +1,4 @@ +# auth + +This module is currently EXPERIMENTAL and under active development. It is not +yet intended to be used. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go new file mode 100644 index 00000000..58af9318 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -0,0 +1,591 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" + + // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, + // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. + defaultExpiryDelta = 225 * time.Second + + universeDomainDefault = "googleapis.com" +) + +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} + + // for testing + timeNow = time.Now +) + +// TokenProvider specifies an interface for anything that can return a token. +type TokenProvider interface { + // Token returns a Token or an error. + // The Token returned must be safe to use + // concurrently. + // The returned Token must not be modified. + // The context provided must be sent along to any requests that are made in + // the implementing code. + Token(context.Context) (*Token, error) +} + +// Token holds the credential token used to authorized requests. All fields are +// considered read-only. +type Token struct { + // Value is the token used to authorize requests. It is usually an access + // token but may be other types of tokens such as ID tokens in some flows. + Value string + // Type is the type of token Value is. If uninitialized, it should be + // assumed to be a "Bearer" token. + Type string + // Expiry is the time the token is set to expire. + Expiry time.Time + // Metadata may include, but is not limited to, the body of the token + // response returned by the server. + Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values +} + +// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not +// expired. A token is considered expired if [Token.Expiry] has passed or will +// pass in the next 225 seconds. +func (t *Token) IsValid() bool { + return t.isValidWithEarlyExpiry(defaultExpiryDelta) +} + +func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { + if t.isEmpty() { + return false + } + if t.Expiry.IsZero() { + return true + } + return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) +} + +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + +// Credentials holds Google credentials, including +// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +type Credentials struct { + json []byte + projectID CredentialsPropertyProvider + quotaProjectID CredentialsPropertyProvider + // universeDomain is the default service domain for a given Cloud universe. + universeDomain CredentialsPropertyProvider + + TokenProvider +} + +// JSON returns the bytes associated with the the file used to source +// credentials if one was used. +func (c *Credentials) JSON() []byte { + return c.json +} + +// ProjectID returns the associated project ID from the underlying file or +// environment. +func (c *Credentials) ProjectID(ctx context.Context) (string, error) { + if c.projectID == nil { + return internal.GetProjectID(c.json, ""), nil + } + v, err := c.projectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetProjectID(c.json, v), nil +} + +// QuotaProjectID returns the associated quota project ID from the underlying +// file or environment. +func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) { + if c.quotaProjectID == nil { + return internal.GetQuotaProject(c.json, ""), nil + } + v, err := c.quotaProjectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetQuotaProject(c.json, v), nil +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) { + if c.universeDomain == nil { + return universeDomainDefault, nil + } + v, err := c.universeDomain.GetProperty(ctx) + if err != nil { + return "", err + } + if v == "" { + return universeDomainDefault, nil + } + return v, err +} + +// CredentialsPropertyProvider provides an implementation to fetch a property +// value for [Credentials]. +type CredentialsPropertyProvider interface { + GetProperty(context.Context) (string, error) +} + +// CredentialsPropertyFunc is a type adapter to allow the use of ordinary +// functions as a [CredentialsPropertyProvider]. +type CredentialsPropertyFunc func(context.Context) (string, error) + +// GetProperty loads the properly value provided the given context. +func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) { + return p(ctx) +} + +// CredentialsOptions are used to configure [Credentials]. +type CredentialsOptions struct { + // TokenProvider is a means of sourcing a token for the credentials. Required. + TokenProvider TokenProvider + // JSON is the raw contents of the credentials file if sourced from a file. + JSON []byte + // ProjectIDProvider resolves the project ID associated with the + // credentials. + ProjectIDProvider CredentialsPropertyProvider + // QuotaProjectIDProvider resolves the quota project ID associated with the + // credentials. + QuotaProjectIDProvider CredentialsPropertyProvider + // UniverseDomainProvider resolves the universe domain with the credentials. + UniverseDomainProvider CredentialsPropertyProvider +} + +// NewCredentials returns new [Credentials] from the provided options. Most users +// will want to build this object a function from the +// [cloud.google.com/go/auth/credentials] package. +func NewCredentials(opts *CredentialsOptions) *Credentials { + creds := &Credentials{ + TokenProvider: opts.TokenProvider, + json: opts.JSON, + projectID: opts.ProjectIDProvider, + quotaProjectID: opts.QuotaProjectIDProvider, + universeDomain: opts.UniverseDomainProvider, + } + + return creds +} + +// CachedTokenProviderOptions provided options for configuring a +// CachedTokenProvider. +type CachedTokenProviderOptions struct { + // DisableAutoRefresh makes the TokenProvider always return the same token, + // even if it is expired. The default is false. Optional. + DisableAutoRefresh bool + // ExpireEarly configures the amount of time before a token expires, that it + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. + ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool +} + +func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { + if ctpo == nil { + return true + } + return !ctpo.DisableAutoRefresh +} + +func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { + if ctpo == nil { + return defaultExpiryDelta + } + return ctpo.ExpireEarly +} + +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + +// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. +func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { + if ctp, ok := tp.(*cachedTokenProvider); ok { + return ctp + } + return &cachedTokenProvider{ + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), + } +} + +type cachedTokenProvider struct { + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool + + mu sync.Mutex + cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool +} + +func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + c.tokenAsync(ctx) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { + c.mu.Lock() + defer c.mu.Unlock() + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { + return c.cachedToken, nil + } + t, err := c.tp.Token(ctx) + if err != nil { + return nil, err + } + c.cachedToken = t + return t, nil +} + +// Error is a error associated with retrieving a [Token]. It can hold useful +// additional details for debugging. +type Error struct { + // Response is the HTTP response associated with error. The body will always + // be already closed and consumed. + Response *http.Response + // Body is the HTTP response body. + Body []byte + // Err is the underlying wrapped error. + Err error + + // code returned in the token response + code string + // description returned in the token response + description string + // uri returned in the token response + uri string +} + +func (e *Error) Error() string { + if e.code != "" { + s := fmt.Sprintf("auth: %q", e.code) + if e.description != "" { + s += fmt.Sprintf(" %q", e.description) + } + if e.uri != "" { + s += fmt.Sprintf(" %q", e.uri) + } + return s + } + return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body) +} + +// Temporary returns true if the error is considered temporary and may be able +// to be retried. +func (e *Error) Temporary() bool { + if e.Response == nil { + return false + } + sc := e.Response.StatusCode + return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests +} + +func (e *Error) Unwrap() error { + return e.Err +} + +// Style describes how the token endpoint wants to receive the ClientID and +// ClientSecret. +type Style int + +const ( + // StyleUnknown means the value has not been initiated. Sending this in + // a request will cause the token exchange to fail. + StyleUnknown Style = iota + // StyleInParams sends client info in the body of a POST request. + StyleInParams + // StyleInHeader sends client info using Basic Authorization header. + StyleInHeader +) + +// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow. +type Options2LO struct { + // Email is the OAuth2 client ID. This value is set as the "iss" in the + // JWT. + Email string + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. It is used to sign + // the JWT created. + PrivateKey []byte + // TokenURL is th URL the JWT is sent to. Required. + TokenURL string + // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the + // "kid" in the JWT header. Optional. + PrivateKeyID string + // Subject is the used for to impersonate a user. It is used as the "sub" in + // the JWT.m Optional. + Subject string + // Scopes specifies requested permissions for the token. Optional. + Scopes []string + // Expires specifies the lifetime of the token. Optional. + Expires time.Duration + // Audience specifies the "aud" in the JWT. Optional. + Audience string + // PrivateClaims allows specifying any custom claims for the JWT. Optional. + PrivateClaims map[string]interface{} + + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // UseIDToken requests that the token returned be an ID token if one is + // returned from the server. Optional. + UseIDToken bool +} + +func (o *Options2LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +func (o *Options2LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.Email == "" { + return errors.New("auth: email must be provided") + } + if len(o.PrivateKey) == 0 { + return errors.New("auth: private key must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + return nil +} + +// New2LOTokenProvider returns a [TokenProvider] from the provided options. +func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return tokenProvider2LO{opts: opts, Client: opts.client()}, nil +} + +type tokenProvider2LO struct { + opts *Options2LO + Client *http.Client +} + +func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { + pk, err := internal.ParseKey(tp.opts.PrivateKey) + if err != nil { + return nil, err + } + claimSet := &jwt.Claims{ + Iss: tp.opts.Email, + Scope: strings.Join(tp.opts.Scopes, " "), + Aud: tp.opts.TokenURL, + AdditionalClaims: tp.opts.PrivateClaims, + Sub: tp.opts.Subject, + } + if t := tp.opts.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := tp.opts.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = tp.opts.PrivateKeyID + payload, err := jwt.EncodeJWS(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, &Error{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + token := &Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + token.Metadata = make(map[string]interface{}) + json.Unmarshal(body, &token.Metadata) // no error checks for optional fields + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jwt.DecodeJWS(v) + if err != nil { + return nil, fmt.Errorf("auth: error decoding JWT token: %w", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if tp.opts.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("auth: response doesn't have JWT token") + } + token.Value = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/BUILD.bazel new file mode 100644 index 00000000..7876e8ec --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "credentials", + srcs = [ + "compute.go", + "detect.go", + "doc.go", + "filetypes.go", + "selfsignedjwt.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials", + importpath = "cloud.google.com/go/auth/credentials", + visibility = ["//visibility:public"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/credentials/internal/externalaccount", + "//vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser", + "//vendor/cloud.google.com/go/auth/credentials/internal/gdch", + "//vendor/cloud.google.com/go/auth/credentials/internal/impersonate", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/credsfile", + "//vendor/cloud.google.com/go/auth/internal/jwt", + "//vendor/cloud.google.com/go/compute/metadata", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go new file mode 100644 index 00000000..6f70fa35 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -0,0 +1,86 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" +) + +var ( + computeTokenMetadata = map[string]interface{}{ + "auth.google.tokenSource": "compute-metadata", + "auth.google.serviceAccount": "default", + } + computeTokenURI = "instance/service-accounts/default/token" +) + +// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that +// uses the metadata service to retrieve tokens. +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, + }) +} + +// computeProvider fetches tokens from the google cloud metadata service. +type computeProvider struct { + scopes []string +} + +type metadataTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` +} + +func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { + tokenURI, err := url.Parse(computeTokenURI) + if err != nil { + return nil, err + } + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI.RawQuery = v.Encode() + } + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + var res metadataTokenResp + if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { + return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, errors.New("credentials: incomplete token received from metadata") + } + return &auth.Token{ + Value: res.AccessToken, + Type: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + Metadata: computeTokenMetadata, + }, nil + +} diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go new file mode 100644 index 00000000..2d9a73ed --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -0,0 +1,262 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" +) + +const ( + // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow. + jwtTokenURL = "https://oauth2.googleapis.com/token" + + // Google's OAuth 2.0 default endpoints. + googleAuthURL = "https://accounts.google.com/o/oauth2/auth" + googleTokenURL = "https://oauth2.googleapis.com/token" + + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + + // Help on default credentials + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +) + +var ( + // for testing + allowOnGCECheck = true +) + +// OnGCE reports whether this process is running in Google Cloud. +func OnGCE() bool { + // TODO(codyoss): once all libs use this auth lib move metadata check here + return allowOnGCECheck && metadata.OnGCE() +} + +// DetectDefault searches for "Application Default Credentials" and returns +// a credential based on the [DetectOptions] provided. +// +// It looks for credentials in the following places, preferring the first +// location found: +// +// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS +// environment variable. For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation +// on how to generate the JSON configuration file for on-prem/non-Google +// cloud platforms. +// - A JSON file in a location known to the gcloud command-line tool. On +// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On +// other systems, $HOME/.config/gcloud/application_default_credentials.json. +// - On Google Compute Engine, Google App Engine standard second generation +// runtimes, and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if len(opts.CredentialsJSON) > 0 { + return readCredentialsFileJSON(opts.CredentialsJSON, opts) + } + if opts.CredentialsFile != "" { + return readCredentialsFile(opts.CredentialsFile, opts) + } + if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err + } + return creds, nil + } + + fileName := credsfile.GetWellKnownFileName() + if b, err := os.ReadFile(fileName); err == nil { + return readCredentialsFileJSON(b, opts) + } + + if OnGCE() { + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { + return metadata.ProjectID() + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + }), nil + } + + return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL) +} + +// DetectOptions provides configuration for [DetectDefault]. +type DetectOptions struct { + // Scopes that credentials tokens should have. Example: + // https://www.googleapis.com/auth/cloud-platform. Required if Audience is + // not provided. + Scopes []string + // Audience that credentials tokens should have. Only applicable for 2LO + // flows with service accounts. If specified, scopes should not be provided. + Audience string + // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + // EarlyTokenRefresh configures how early before a token expires that it + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. + EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool + // AuthHandlerOptions configures an authorization handler and other options + // for 3LO flows. It is required, and only used, for client credential + // flows. + AuthHandlerOptions *auth.AuthorizationHandlerOptions + // TokenURL allows to set the token endpoint for user credential flows. If + // unset the default value is: https://oauth2.googleapis.com/token. + // Optional. + TokenURL string + // STSAudience is the audience sent to when retrieving an STS token. + // Currently this only used for GDCH auth flow, for which it is required. + STSAudience string + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. If provided, CredentialsJSON must not be. + // Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. If provided, CredentialsFile must not be. + // Optional. + CredentialsJSON []byte + // UseSelfSignedJWT directs service account based credentials to create a + // self-signed JWT with the private key found in the file, skipping any + // network requests that would normally be made. Optional. + UseSelfSignedJWT bool + // Client configures the underlying client used to make network requests + // when fetching tokens. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This option is ignored for + // authentication flows that do not support universe domain. Optional. + UniverseDomain string +} + +func (o *DetectOptions) validate() error { + if o == nil { + return errors.New("credentials: options must be provided") + } + if len(o.Scopes) > 0 && o.Audience != "" { + return errors.New("credentials: both scopes and audience were provided") + } + if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" { + return errors.New("credentials: both credentials file and JSON were provided") + } + return nil +} + +func (o *DetectOptions) tokenURL() string { + if o.TokenURL != "" { + return o.TokenURL + } + return googleTokenURL +} + +func (o *DetectOptions) scopes() []string { + scopes := make([]string, len(o.Scopes)) + copy(scopes, o.Scopes) + return scopes +} + +func (o *DetectOptions) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return readCredentialsFileJSON(b, opts) +} + +func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + // attempt to parse jsonData as a Google Developers Console client_credentials.json. + config := clientCredConfigFromJSON(b, opts) + if config != nil { + if config.AuthHandlerOpts == nil { + return nil, errors.New("credentials: auth handler must be specified for this credential filetype") + } + tp, err := auth.New3LOTokenProvider(config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + }), nil + } + return fileCredentials(b, opts) +} + +func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { + var creds credsfile.ClientCredentialsFile + var c *credsfile.Config3LO + if err := json.Unmarshal(b, &creds); err != nil { + return nil + } + switch { + case creds.Web != nil: + c = creds.Web + case creds.Installed != nil: + c = creds.Installed + default: + return nil + } + if len(c.RedirectURIs) < 1 { + return nil + } + var handleOpts *auth.AuthorizationHandlerOptions + if opts.AuthHandlerOptions != nil { + handleOpts = &auth.AuthorizationHandlerOptions{ + Handler: opts.AuthHandlerOptions.Handler, + State: opts.AuthHandlerOptions.State, + PKCEOpts: opts.AuthHandlerOptions.PKCEOpts, + } + } + return &auth.Options3LO{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: opts.scopes(), + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + Client: opts.client(), + EarlyTokenExpiry: opts.EarlyTokenRefresh, + AuthHandlerOpts: handleOpts, + // TODO(codyoss): refactor this out. We need to add in auto-detection + // for this use case. + AuthStyle: auth.StyleInParams, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go new file mode 100644 index 00000000..1dbb2866 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. It supports the Web server flow, +// client-side credentials, service accounts, Google Compute Engine service +// accounts, Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # Credentials +// +// The [cloud.google.com/go/auth.Credentials] type represents Google +// credentials, including Application Default Credentials. +// +// Use [DetectDefault] to obtain Application Default Credentials. +// +// Application Default Credentials support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage, and +// store service account private keys locally. +// +// # Workforce Identity Federation +// +// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount]. +package credentials diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go new file mode 100644 index 00000000..fe935573 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -0,0 +1,221 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "errors" + "fmt" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/externalaccount" + "cloud.google.com/go/auth/credentials/internal/externalaccountuser" + "cloud.google.com/go/auth/credentials/internal/gdch" + "cloud.google.com/go/auth/credentials/internal/impersonate" + internalauth "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + fileType, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + + var projectID, quotaProjectID, universeDomain string + var tp auth.TokenProvider + switch fileType { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.ProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.UserCredentialsKey: + f, err := credsfile.ParseUserCredentials(b) + if err != nil { + return nil, err + } + tp, err = handleUserCredential(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = f.UniverseDomain + case credsfile.ExternalAccountKey: + f, err := credsfile.ParseExternalAccount(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccount(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.ExternalAccountAuthorizedUserKey: + f, err := credsfile.ParseExternalAccountAuthorizedUser(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccountAuthorizedUser(f, opts) + if err != nil { + return nil, err + } + quotaProjectID = f.QuotaProjectID + universeDomain = f.UniverseDomain + case credsfile.ImpersonatedServiceAccountKey: + f, err := credsfile.ParseImpersonatedServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleImpersonatedServiceAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.GDCHServiceAccountKey: + f, err := credsfile.ParseGDCHServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleGDCHServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.Project + universeDomain = f.UniverseDomain + default: + return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + }), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), + }), nil +} + +// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to +// support configuring universe-specific credentials in code. Auth flows +// unsupported for universe domain should not use this func, but should instead +// simply set the file universe domain on the credentials. +func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string { + if optsUniverseDomain != "" { + return optsUniverseDomain + } + return fileUniverseDomain +} + +func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if opts.UseSelfSignedJWT { + return configureSelfSignedJWT(f, opts) + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + Client: opts.client(), + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + return auth.New2LOTokenProvider(opts2LO) +} + +func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { + opts3LO := &auth.Options3LO{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + AuthURL: googleAuthURL, + TokenURL: opts.tokenURL(), + AuthStyle: auth.StyleInParams, + EarlyTokenExpiry: opts.EarlyTokenRefresh, + RefreshToken: f.RefreshToken, + Client: opts.client(), + } + return auth.New3LOTokenProvider(opts3LO) +} + +func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccount.Options{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: opts.scopes(), + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + Client: opts.client(), + } + if f.ServiceAccountImpersonation != nil { + externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds + } + return externalaccount.NewTokenProvider(externalOpts) +} + +func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccountuser.Options{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + Client: opts.client(), + } + return externalaccountuser.NewTokenProvider(externalOpts) +} + +func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + tp, err := fileCredentials(f.CredSource, opts) + if err != nil { + return nil, err + } + return impersonate.NewTokenProvider(&impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: tp, + Delegates: f.Delegates, + Client: opts.client(), + }) +} + +func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + return gdch.NewTokenProvider(f, &gdch.Options{ + STSAudience: opts.STSAudience, + Client: opts.client(), + }) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/BUILD.bazel new file mode 100644 index 00000000..d4a57526 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "externalaccount", + srcs = [ + "aws_provider.go", + "executable_provider.go", + "externalaccount.go", + "file_provider.go", + "info.go", + "programmatic_provider.go", + "url_provider.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount", + importpath = "cloud.google.com/go/auth/credentials/internal/externalaccount", + visibility = ["//vendor/cloud.google.com/go/auth/credentials:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/credentials/internal/impersonate", + "//vendor/cloud.google.com/go/auth/credentials/internal/stsexchange", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/credsfile", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go new file mode 100644 index 00000000..a34f6b06 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -0,0 +1,522 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +var ( + // getenv aliases os.Getenv for testing + getenv = os.Getenv +) + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTTL = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // Supported AWS configuration environment variables. + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnvVar = "AWS_REGION" + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" + awsProviderType = "aws" +) + +type awsSubjectProvider struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + IMDSv2SessionTokenURL string + TargetResource string + requestSigner *awsRequestSigner + region string + securityCredentialsProvider AwsSecurityCredentialsProvider + reqOpts *RequestOptions + + Client *http.Client +} + +func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { + // Set Defaults + if sp.RegionalCredVerificationURL == "" { + sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL + } + if sp.requestSigner == nil { + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if sp.TargetResource != "" { + req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource) + } + sp.requestSigner.signRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (sp *awsSubjectProvider) providerType() string { + if sp.securityCredentialsProvider != nil { + return programmaticProviderType + } + return awsProviderType +} + +func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) { + if sp.IMDSv2SessionTokenURL == "" { + return "", nil + } + req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) + } + return string(body), nil +} + +func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv(awsDefaultRegionEnvVar), nil + } + + if sp.RegionURL == "" { + return "", errors.New("credentials: unable to determine AWS region") + } + + req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + bodyLen := len(body) + if bodyLen == 0 { + return "", nil + } + return string(body[:bodyLen-1]), nil +} + +func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyIDEnvVar), + SecretAccessKey: getenv(awsSecretAccessKeyEnvVar), + SessionToken: getenv(awsSessionTokenEnvVar), + }, nil + } + + roleName, err := sp.getMetadataRoleName(ctx, headers) + if err != nil { + return + } + credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("credentials: missing AccessKeyId credential") + } + if credentials.SecretAccessKey == "" { + return result, errors.New("credentials: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) { + var result *AwsSecurityCredentials + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return result, err + } + if resp.StatusCode != http.StatusOK { + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err + } + return result, nil +} + +func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { + if sp.CredVerificationURL == "" { + return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint") + } + req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil) + if err != nil { + return "", err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) + } + return string(body), nil +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// signRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) signRequest(req *http.Request) error { + // req is assumed non-nil + signedRequest := cloneRequest(req) + timestamp := Now() + signedRequest.Header.Set("host", requestHost(req)) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/") + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n") + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = internal.ReadAll(requestBody) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is +// required. +func canRetrieveRegionFromEnvironment() bool { + return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != "" +} + +// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. +func canRetrieveSecurityCredentialFromEnvironment() bool { + return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != "" +} + +func (sp *awsSubjectProvider) shouldUseMetadataServer() bool { + return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go new file mode 100644 index 00000000..d5765c47 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go @@ -0,0 +1,284 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +const ( + executableSupportedMaxVersion = 1 + executableDefaultTimeout = 30 * time.Second + executableSource = "response" + executableProviderType = "executable" + outputFileSource = "output file" + + allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" + idTokenType = "urn:ietf:params:oauth:token-type:id_token" + saml2TokenType = "urn:ietf:params:oauth:token-type:saml2" +) + +var ( + serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`) +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +// environment is a contract for testing +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError) + } + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableSubjectProvider struct { + Command string + Timeout time.Duration + OutputFile string + client *http.Client + opts *Options + env environment +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IDToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + // Validate + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + if result.Success == nil { + return "", missingFieldError(source, "success") + } + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + if result.ExpirationTime == 0 && sp.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + switch result.TokenType { + case jwtTokenType, idTokenType: + if result.IDToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IDToken, nil + case saml2TokenType: + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + default: + return "", tokenTypeError(source) + } +} + +func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) { + if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + return sp.getTokenFromExecutableCommand(ctx) +} + +func (sp *executableSubjectProvider) providerType() string { + return executableProviderType +} + +func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) { + if sp.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(sp.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := internal.ReadAll(file) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (sp *executableSubjectProvider) executableEnvironment() []string { + result := sp.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if sp.opts.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if sp.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile)) + } + return result +} + +func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if sp.env.getenv(allowExecutablesEnvVar) != "1" { + return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + } + + ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout)) + defer cancel() + + output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment()) + if err != nil { + return "", err + } + return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix()) +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("credentials: %q missing %q field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("credentials: unable to parse %q: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"credentials: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("credentials: %v contains unsupported token type", source) +} + +func exitCodeError(err *exec.ExitError) error { + return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err) +} + +func executableError(err error) error { + return fmt.Errorf("credentials: executable command failed: %w", err) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go new file mode 100644 index 00000000..b19c6ede --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -0,0 +1,367 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/impersonate" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +var ( + // Now aliases time.Now for testing + Now = func() time.Time { + return time.Now().UTC() + } + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +// Options stores the configuration for fetching tokens with external credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource *credsfile.CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + // WorkforcePoolUserProject should be set when it is a workforce pool and + // not a workload identity pool. The underlying principal must still have + // serviceusage.services.use IAM permission to use the project for + // billing/quota. Optional. + WorkforcePoolUserProject string + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string + // SubjectTokenProvider is an optional token provider for OIDC/SAML + // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider + // or CredentialSource must be provided. Optional. + SubjectTokenProvider SubjectTokenProvider + // AwsSecurityCredentialsProvider is an AWS Security Credential provider + // for AWS credentials. One of SubjectTokenProvider, + // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional. + AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider + // Client for token request. + Client *http.Client +} + +// SubjectTokenProvider can be used to supply a subject token to exchange for a +// GCP access token. +type SubjectTokenProvider interface { + // SubjectToken should return a valid subject token or an error. + // The external account token provider does not cache the returned subject + // token, so caching logic should be implemented in the provider to prevent + // multiple requests for the same subject token. + SubjectToken(ctx context.Context, opts *RequestOptions) (string, error) +} + +// RequestOptions contains information about the requested subject token or AWS +// security credentials from the Google external account credential. +type RequestOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external + // account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials +// and an AWS Region to exchange for a GCP access token. +type AwsSecurityCredentialsProvider interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, opts *RequestOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of + // AwsSecurityCredentials or an error. The external account token provider + // does not cache the returned security credentials, so caching logic should + // be implemented in the provider to prevent multiple requests for the + // same security credentials. + AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error) +} + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for + // temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +func (o *Options) validate() error { + if o.Audience == "" { + return fmt.Errorf("externalaccount: Audience must be set") + } + if o.SubjectTokenType == "" { + return fmt.Errorf("externalaccount: Subject token type must be set") + } + if o.WorkforcePoolUserProject != "" { + if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid { + return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials") + } + } + count := 0 + if o.CredentialSource != nil { + count++ + } + if o.SubjectTokenProvider != nil { + count++ + } + if o.AwsSecurityCredentialsProvider != nil { + count++ + } + if count == 0 { + return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + if count > 1 { + return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + return nil +} + +// resolveTokenURL sets the default STS token endpoint with the configured +// universe domain. +func (o *Options) resolveTokenURL() { + if o.TokenURL != "" { + return + } else if o.UniverseDomain != "" { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1) + } else { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + opts.resolveTokenURL() + stp, err := newSubjectTokenProvider(opts) + if err != nil { + return nil, err + } + tp := &tokenProvider{ + client: opts.Client, + opts: opts, + stp: stp, + } + if opts.ServiceAccountImpersonationURL == "" { + return auth.NewCachedTokenProvider(tp, nil), nil + } + + scopes := make([]string, len(opts.Scopes)) + copy(scopes, opts.Scopes) + // needed for impersonation + tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp, err := impersonate.NewTokenProvider(&impersonate.Options{ + Client: opts.Client, + URL: opts.ServiceAccountImpersonationURL, + Scopes: scopes, + Tp: auth.NewCachedTokenProvider(tp, nil), + TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + }) + if err != nil { + return nil, err + } + return auth.NewCachedTokenProvider(imp, nil), nil +} + +type subjectTokenProvider interface { + subjectToken(ctx context.Context) (string, error) + providerType() string +} + +// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. +type tokenProvider struct { + client *http.Client + opts *Options + stp subjectTokenProvider +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + subjectToken, err := tp.stp.subjectToken(ctx) + if err != nil { + return nil, err + } + + stsRequest := &stsexchange.TokenRequest{ + GrantType: stsexchange.GrantType, + Audience: tp.opts.Audience, + Scope: tp.opts.Scopes, + RequestedTokenType: stsexchange.TokenType, + SubjectToken: subjectToken, + SubjectTokenType: tp.opts.SubjectTokenType, + } + header := make(http.Header) + header.Set("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: tp.opts.ClientID, + ClientSecret: tp.opts.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" { + options = map[string]interface{}{ + "userProject": tp.opts.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{ + Client: tp.client, + Endpoint: tp.opts.TokenURL, + Request: stsRequest, + Authentication: clientAuth, + Headers: header, + ExtraOpts: options, + }) + if err != nil { + return nil, err + } + + tok := &auth.Token{ + Value: stsResp.AccessToken, + Type: stsResp.TokenType, + } + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("credentials: got invalid expiry from security token service") + } + tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + return tok, nil +} + +// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a +// subjectTokenProvider +func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} + if o.AwsSecurityCredentialsProvider != nil { + return &awsSubjectProvider{ + securityCredentialsProvider: o.AwsSecurityCredentialsProvider, + TargetResource: o.Audience, + reqOpts: reqOpts, + }, nil + } else if o.SubjectTokenProvider != nil { + return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil + } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion) + } + + awsProvider := &awsSubjectProvider{ + EnvironmentID: o.CredentialSource.EnvironmentID, + RegionURL: o.CredentialSource.RegionURL, + RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: o.CredentialSource.URL, + TargetResource: o.Audience, + Client: o.Client, + } + if o.CredentialSource.IMDSv2SessionTokenURL != "" { + awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL + } + + return awsProvider, nil + } + } else if o.CredentialSource.File != "" { + return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil + } else if o.CredentialSource.URL != "" { + return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + } else if o.CredentialSource.Executable != nil { + ec := o.CredentialSource.Executable + if ec.Command == "" { + return nil, errors.New("credentials: missing `command` field — executable command must be provided") + } + + execProvider := &executableSubjectProvider{} + execProvider.Command = ec.Command + if ec.TimeoutMillis == 0 { + execProvider.Timeout = executableDefaultTimeout + } else { + execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond + if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum { + return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds()) + } + } + execProvider.OutputFile = ec.OutputFile + execProvider.client = o.Client + execProvider.opts = o + execProvider.env = runtimeEnvironment{} + return execProvider, nil + } + return nil, errors.New("credentials: unable to parse credential source") +} + +func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + p.providerType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go new file mode 100644 index 00000000..8186939f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileProviderType = "file" +) + +type fileSubjectProvider struct { + File string + Format *credsfile.Format +} + +func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) { + tokenFile, err := os.Open(sp.File) + if err != nil { + return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err) + } + defer tokenFile.Close() + tokenBytes, err := internal.ReadAll(tokenFile) + if err != nil { + return "", fmt.Errorf("credentials: failed to read credential file: %w", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + + if sp.Format == nil { + return string(tokenBytes), nil + } + switch sp.Format.Type { + case fileTypeJSON: + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(tokenBytes), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *fileSubjectProvider) providerType() string { + return fileProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go new file mode 100644 index 00000000..8e4b4379 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go @@ -0,0 +1,74 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return versionUnknown +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go similarity index 59% rename from vendor/google.golang.org/genproto/googleapis/api/tidyfix.go rename to vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go index 1d3f1b5b..be3c8735 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack +package externalaccount -package api +import "context" -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "google.golang.org/genproto/internal" +type programmaticProvider struct { + opts *RequestOptions + stp SubjectTokenProvider +} + +func (pp *programmaticProvider) providerType() string { + return programmaticProviderType +} + +func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) { + return pp.stp.SubjectToken(ctx, pp.opts) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go new file mode 100644 index 00000000..e33d35a2 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -0,0 +1,87 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileTypeText = "text" + fileTypeJSON = "json" + urlProviderType = "url" + programmaticProviderType = "programmatic" +) + +type urlSubjectProvider struct { + URL string + Headers map[string]string + Format *credsfile.Format + Client *http.Client +} + +func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil) + if err != nil { + return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err) + } + + for key, val := range sp.Headers { + req.Header.Add(key, val) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return "", fmt.Errorf("credentials: status code %d: %s", c, body) + } + + if sp.Format == nil { + return string(body), nil + } + switch sp.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(body, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(body), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *urlSubjectProvider) providerType() string { + return urlProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/BUILD.bazel new file mode 100644 index 00000000..ab60c1ff --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "externalaccountuser", + srcs = ["externalaccountuser.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser", + importpath = "cloud.google.com/go/auth/credentials/internal/externalaccountuser", + visibility = ["//vendor/cloud.google.com/go/auth/credentials:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/credentials/internal/stsexchange", + "//vendor/cloud.google.com/go/auth/internal", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go new file mode 100644 index 00000000..0d788547 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -0,0 +1,110 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccountuser + +import ( + "context" + "errors" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal" +) + +// Options stores the configuration for fetching tokens with external authorized +// user credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the + // resource name for the workforce pool and the provider identifier in that + // pool. + Audience string + // RefreshToken is the OAuth 2.0 refresh token. + RefreshToken string + // TokenURL is the STS token exchange endpoint for refresh. + TokenURL string + // TokenInfoURL is the STS endpoint URL for token introspection. Optional. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described + // below. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs + // to be called with the generated a cloud access token. When provided, STS + // will be called with additional basic authentication using client_id as + // username and client_secret as password. + ClientSecret string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + + // Client for token request. + Client *http.Client +} + +func (c *Options) validate() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if !opts.validate() { + return nil, errors.New("credentials: invalid external_account_authorized_user configuration") + } + + tp := &tokenProvider{ + o: opts, + } + return auth.NewCachedTokenProvider(tp, nil), nil +} + +type tokenProvider struct { + o *Options +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + opts := tp.o + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + } + headers := make(http.Header) + headers.Set("Content-Type", "application/x-www-form-urlencoded") + stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{ + Client: opts.Client, + Endpoint: opts.TokenURL, + RefreshToken: opts.RefreshToken, + Authentication: clientAuth, + Headers: headers, + }) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("credentials: invalid expiry from security token service") + } + + // guarded by the wrapping with CachedTokenProvider + if stsResponse.RefreshToken != "" { + opts.RefreshToken = stsResponse.RefreshToken + } + return &auth.Token{ + Value: stsResponse.AccessToken, + Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + Type: internal.TokenTypeBearer, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/BUILD.bazel new file mode 100644 index 00000000..c9172636 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gdch", + srcs = ["gdch.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials/internal/gdch", + importpath = "cloud.google.com/go/auth/credentials/internal/gdch", + visibility = ["//vendor/cloud.google.com/go/auth/credentials:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/credsfile", + "//vendor/cloud.google.com/go/auth/internal/jwt", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go new file mode 100644 index 00000000..720045d3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -0,0 +1,184 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gdch + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // GrantType is the grant type for the token request. + GrantType = "urn:ietf:params:oauth:token-type:token-exchange" + requestTokenType = "urn:ietf:params:oauth:token-type:access_token" + subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount" +) + +var ( + gdchSupportFormatVersions map[string]bool = map[string]bool{ + "1": true, + } +) + +// Options for [NewTokenProvider]. +type Options struct { + STSAudience string + Client *http.Client +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a +// GDCH cred file. +func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) { + if !gdchSupportFormatVersions[f.FormatVersion] { + return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion) + } + if o.STSAudience == "" { + return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") + } + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, err + } + certPool, err := loadCertPool(f.CertPath) + if err != nil { + return nil, err + } + + tp := gdchProvider{ + serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), + tokenURL: f.TokenURL, + aud: o.STSAudience, + pk: pk, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, + } + return tp, nil +} + +func loadCertPool(path string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + pem, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read certificate: %w", err) + } + pool.AppendCertsFromPEM(pem) + return pool, nil +} + +type gdchProvider struct { + serviceIdentity string + tokenURL string + aud string + pk *rsa.PrivateKey + pkID string + certPool *x509.CertPool + + client *http.Client +} + +func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { + addCertToTransport(g.client, g.certPool) + iat := time.Now() + exp := iat.Add(time.Hour) + claims := jwt.Claims{ + Iss: g.serviceIdentity, + Sub: g.serviceIdentity, + Aud: g.tokenURL, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(g.pkID), + } + payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", GrantType) + v.Set("audience", g.aud) + v.Set("requested_token_type", requestTokenType) + v.Set("subject_token", payload) + v.Set("subject_token_type", subjectTokenType) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, &auth.Error{ + Response: resp, + Body: body, + } + } + + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + token := &auth.Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token.Metadata = raw + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// addCertToTransport makes a best effort attempt at adding in the cert info to +// the client. It tries to keep all configured transport settings if the +// underlying transport is an http.Transport. Or else it overwrites the +// transport with defaults adding in the certs. +func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { + trans, ok := hc.Transport.(*http.Transport) + if !ok { + trans = http.DefaultTransport.(*http.Transport).Clone() + } + trans.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/BUILD.bazel new file mode 100644 index 00000000..9c89a09b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "impersonate", + srcs = ["impersonate.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials/internal/impersonate", + importpath = "cloud.google.com/go/auth/credentials/internal/impersonate", + visibility = ["//vendor/cloud.google.com/go/auth/credentials:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/internal", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go new file mode 100644 index 00000000..ed53afa5 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -0,0 +1,146 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + defaultTokenLifetime = "3600s" + authHeaderKey = "Authorization" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return opts, nil +} + +// Options for [NewTokenProvider]. +type Options struct { + // Tp is the source credential used to generate a token on the + // impersonated service account. Required. + Tp auth.TokenProvider + + // URL is the endpoint to call to generate a token + // on behalf of the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. Defaults to 1 hour if unset. Optional. + TokenLifetimeSeconds int + // Client configures the underlying client used to make network requests + // when fetching tokens. Required. + Client *http.Client +} + +func (o *Options) validate() error { + if o.Tp == nil { + return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials") + } + if o.URL == "" { + return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials") + } + return nil +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + lifetime := defaultTokenLifetime + if o.TokenLifetimeSeconds != 0 { + lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetime, + Scope: o.Scopes, + Delegates: o.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("credentials: unable to marshal request: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if err := setAuthHeader(ctx, o.Tp, req); err != nil { + return nil, err + } + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("credentials: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + Type: internal.TokenTypeBearer, + }, nil +} + +func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { + t, err := tp.Token(ctx) + if err != nil { + return err + } + typ := t.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + r.Header.Set(authHeaderKey, typ+" "+t.Value) + return nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/BUILD.bazel b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/BUILD.bazel new file mode 100644 index 00000000..816b9392 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "stsexchange", + srcs = ["sts_exchange.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange", + importpath = "cloud.google.com/go/auth/credentials/internal/stsexchange", + visibility = ["//vendor/cloud.google.com/go/auth/credentials:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/internal", + ], +) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go new file mode 100644 index 00000000..768a9daf --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -0,0 +1,161 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stsexchange + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + // GrantType for a sts exchange. + GrantType = "urn:ietf:params:oauth:grant-type:token-exchange" + // TokenType for a sts exchange. + TokenType = "urn:ietf:params:oauth:token-type:access_token" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" +) + +// Options stores the configuration for making an sts exchange request. +type Options struct { + Client *http.Client + Endpoint string + Request *TokenRequest + Authentication ClientAuthentication + Headers http.Header + // ExtraOpts are optional fields marshalled into the `options` field of the + // request body. + ExtraOpts map[string]interface{} + RefreshToken string +} + +// RefreshAccessToken performs the token exchange using a refresh token flow. +func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", opts.RefreshToken) + return doRequest(ctx, opts, data) +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("audience", opts.Request.Audience) + data.Set("grant_type", GrantType) + data.Set("requested_token_type", TokenType) + data.Set("subject_token_type", opts.Request.SubjectTokenType) + data.Set("subject_token", opts.Request.SubjectToken) + data.Set("scope", strings.Join(opts.Request.Scope, " ")) + if opts.ExtraOpts != nil { + opts, err := json.Marshal(opts.ExtraOpts) + if err != nil { + return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err) + } + data.Set("options", string(opts)) + } + return doRequest(ctx, opts, data) +} + +func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { + opts.Authentication.InjectAuthentication(data, opts.Headers) + encodedData := data.Encode() + + req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err) + + } + for key, list := range opts.Headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + + resp, body, err := internal.DoRequest(opts.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + var stsResp TokenResponse + if err := json.Unmarshal(body, &stsResp); err != nil { + return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err) + } + + return &stsResp, nil +} + +// TokenRequest contains fields necessary to make an oauth2 token +// exchange. +type TokenRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// TokenResponse is used to decode the remote server response during +// an oauth2 token exchange. +type TokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// ClientAuthentication represents an OAuth client ID and secret and the +// mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + AuthStyle auth.Style + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service +// exchange request. It modifies either the passed url.Values or http.Header +// depending on the desired authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + switch c.AuthStyle { + case auth.StyleInHeader: + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go new file mode 100644 index 00000000..b62a8ae4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -0,0 +1,81 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "crypto/rsa" + "fmt" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +var ( + // for testing + now func() time.Time = time.Now +) + +// configureSelfSignedJWT uses the private key in the service account to create +// a JWT without making a network call. +func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("credentials: could not parse key: %w", err) + } + return &selfSignedTokenProvider{ + email: f.ClientEmail, + audience: opts.Audience, + scopes: opts.scopes(), + pk: pk, + pkID: f.PrivateKeyID, + }, nil +} + +type selfSignedTokenProvider struct { + email string + audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { + iat := now() + exp := iat.Add(time.Hour) + scope := strings.Join(tp.scopes, " ") + c := &jwt.Claims{ + Iss: tp.email, + Sub: tp.email, + Aud: tp.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := &jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(tp.pkID), + } + msg, err := jwt.EncodeJWS(h, c, tp.pk) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/BUILD.bazel b/vendor/cloud.google.com/go/auth/grpctransport/BUILD.bazel new file mode 100644 index 00000000..8f6b7cee --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "grpctransport", + srcs = [ + "dial_socketopt.go", + "directpath.go", + "grpctransport.go", + "pool.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/grpctransport", + importpath = "cloud.google.com/go/auth/grpctransport", + visibility = ["//visibility:public"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/credentials", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/transport", + "//vendor/cloud.google.com/go/compute/metadata", + "//vendor/go.opencensus.io/plugin/ocgrpc", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/google", + "@org_golang_google_grpc//credentials/insecure", + ], +) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go new file mode 100644 index 00000000..e6136080 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go @@ -0,0 +1,62 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package grpctransport + +import ( + "context" + "net" + "syscall" + + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go new file mode 100644 index 00000000..8dbfa7ef --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -0,0 +1,123 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "net" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" + "google.golang.org/grpc" + grpcgoogle "google.golang.org/grpc/credentials/google" +) + +func isDirectPathEnabled(endpoint string, opts *Options) bool { + if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { + return false + } + if !checkDirectPathEndPoint(endpoint) { + return false + } + if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b { + return false + } + return true +} + +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + if endpoint == "" { + return false + } + + return true +} + +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { + if tp == nil { + return false + } + tok, err := tp.Token(context.Background()) + if err != nil { + return false + } + if tok == nil { + return false + } + if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" { + return false + } + return true +} + +func isDirectPathXdsUsed(o *Options) bool { + // Method 1: Enable DirectPath xDS by env; + if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds { + return true + } + return false +} + +// configureDirectPath returns some dial options and an endpoint to use if the +// configuration allows the use of direct path. If it does not the provided +// grpcOpts and endpoint are returned. +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { + if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + if timeoutDialerOption != nil { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + // Check if google-c2p resolver is enabled for DirectPath + if isDirectPathXdsUsed(opts) { + // google-c2p resolver target must not have a port number + if addr, _, err := net.SplitHostPort(endpoint); err == nil { + endpoint = "google-c2p:///" + addr + } else { + endpoint = "google-c2p:///" + endpoint + } + } else { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint + } + grpcOpts = append(grpcOpts, + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) + } + // TODO: add support for system parameters (quota project, request reason) via chained interceptor. + } + return grpcOpts, endpoint +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go new file mode 100644 index 00000000..5c3bc66f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -0,0 +1,384 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "go.opencensus.io/plugin/ocgrpc" + "google.golang.org/grpc" + grpccreds "google.golang.org/grpc/credentials" + grpcinsecure "google.golang.org/grpc/credentials/insecure" +) + +const ( + // Check env to disable DirectPath traffic. + disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" + + // Check env to decide if using google-c2p resolver for DirectPath traffic. + enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" + + quotaProjectHeaderKey = "X-Goog-User-Project" +) + +var ( + // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. + timeoutDialerOption grpc.DialOption +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [GRPCClientConnPool] from [Dial]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // Metadata is extra gRPC metadata that will be appended to every outgoing + // request. + Metadata map[string]string + // GRPCDialOpts are dial options that will be passed to `grpc.Dial` when + // establishing a`grpc.Conn`` + GRPCDialOpts []grpc.DialOption + // PoolSize is specifies how many connections to balance between when making + // requests. If unset or less than 1, the value defaults to 1. + PoolSize int + // Credentials used to add Authorization metadata to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *credentials.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("grpctransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +func (o *Options) resolveDetectOptions() *credentials.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableNonDefaultSAForDirectPath overrides the default requirement for + // using the default service account for DirectPath. + EnableNonDefaultSAForDirectPath bool + // EnableDirectPath overrides the default attempt to use DirectPath. + EnableDirectPath bool + // EnableDirectPathXds overrides the default DirectPath type. It is only + // valid when DirectPath is enabled. + EnableDirectPathXds bool + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies + // the default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// Dial returns a GRPCClientConnPool that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization metadata to all outgoing requests. +func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.PoolSize <= 1 { + conn, err := dial(ctx, secure, opts) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + pool := &roundRobinConnPool{} + for i := 0; i < opts.PoolSize; i++ { + conn, err := dial(ctx, secure, opts) + if err != nil { + // ignore close error, if any + defer pool.Close() + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 +func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + tOpts.EnableDirectPath = io.EnableDirectPath + tOpts.EnableDirectPathXds = io.EnableDirectPathXds + } + transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) + if err != nil { + return nil, err + } + + if !secure { + transportCreds = grpcinsecure.NewCredentials() + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { + metadata := opts.Metadata + + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + + qp, err := creds.QuotaProjectID(ctx) + if err != nil { + return nil, err + } + if qp != "" { + if metadata == nil { + metadata = make(map[string]string, 1) + } + metadata[quotaProjectHeaderKey] = qp + } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ + creds: creds, + metadata: metadata, + clientUniverseDomain: opts.UniverseDomain, + }), + ) + + // Attempt Direct Path + grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) + + return grpc.DialContext(ctx, endpoint, grpcOpts...) +} + +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure +} + +// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcCredentialsProvider struct { + creds *auth.Credentials + + secure bool + + // Additional metadata attached as headers. + metadata map[string]string + clientUniverseDomain string +} + +// getClientUniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". This is the universe domain +// configured for the client, which will be compared to the universe domain +// that is separately configured for the credentials. +func (c *grpcCredentialsProvider) getClientUniverseDomain() string { + if c.clientUniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return c.clientUniverseDomain +} + +func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + token, err := c.creds.Token(ctx) + if err != nil { + return nil, err + } + if c.secure { + ri, _ := grpccreds.RequestInfoFromContext(ctx) + if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err) + } + } + metadata := make(map[string]string, len(c.metadata)+1) + setAuthMetadata(token, metadata) + for k, v := range c.metadata { + metadata[k] = v + } + return metadata, nil +} + +// setAuthMetadata uses the provided token to set the Authorization metadata. +// If the token.Type is empty, the type is assumed to be Bearer. +func setAuthMetadata(token *auth.Token, m map[string]string) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + m["authorization"] = typ + " " + token.Value +} + +func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { + return c.secure +} + +func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/pool.go b/vendor/cloud.google.com/go/auth/grpctransport/pool.go new file mode 100644 index 00000000..642679f9 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/pool.go @@ -0,0 +1,119 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "fmt" + "sync/atomic" + + "google.golang.org/grpc" +) + +// GRPCClientConnPool is an interface that satisfies +// [google.golang.org/grpc.ClientConnInterface] and has some utility functions +// that are needed for connection lifecycle when using in a client library. It +// may be a pool or a single connection. This interface is not intended to, and +// can't be, implemented by others. +type GRPCClientConnPool interface { + // Connection returns a [google.golang.org/grpc.ClientConn] from the pool. + // + // ClientConn aren't returned to the pool and should not be closed directly. + Connection() *grpc.ClientConn + + // Len returns the number of connections in the pool. It will always return + // the same value. + Len() int + + // Close closes every ClientConn in the pool. The error returned by Close + // may be a single error or multiple errors. + Close() error + + grpc.ClientConnInterface + + // private ensure others outside this package can't implement this type + private() +} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Len() int { return 1 } +func (p *singleConnPool) private() {} + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Len() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Connection() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Connection().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Connection().NewStream(ctx, desc, method, opts...) +} + +func (p *roundRobinConnPool) private() {} + +// multiError represents errors from multiple conns in the group. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/BUILD.bazel b/vendor/cloud.google.com/go/auth/httptransport/BUILD.bazel new file mode 100644 index 00000000..d5dc75e8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "httptransport", + srcs = [ + "httptransport.go", + "trace.go", + "transport.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/httptransport", + importpath = "cloud.google.com/go/auth/httptransport", + visibility = ["//visibility:public"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "//vendor/cloud.google.com/go/auth/credentials", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/transport", + "//vendor/cloud.google.com/go/auth/internal/transport/cert", + "//vendor/go.opencensus.io/plugin/ochttp", + "//vendor/go.opencensus.io/trace", + "//vendor/go.opencensus.io/trace/propagation", + "@org_golang_x_net//http2", + ], +) diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go new file mode 100644 index 00000000..969c8d4d --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -0,0 +1,224 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "crypto/tls" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth" + detect "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [net/http.Client] from [NewClient]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Headers are extra HTTP headers that will be appended to every outgoing + // request. + Headers http.Header + // BaseRoundTripper overrides the base transport used for serving requests. + // If specified ClientCertProvider is ignored. + BaseRoundTripper http.RoundTripper + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Credentials used to add Authorization header to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *detect.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("httptransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) resolveDetectOptions() *detect.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies the + // default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// AddAuthorizationMiddleware adds a middleware to the provided client's +// transport that sets the Authorization header with the value produced by the +// provided [cloud.google.com/go/auth.Credentials]. An error is returned only +// if client or creds is nil. +func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { + if client == nil || creds == nil { + return fmt.Errorf("httptransport: client and tp must not be nil") + } + base := client.Transport + if base == nil { + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } + } + client.Transport = &authTransport{ + creds: creds, + base: base, + // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. + } + return nil +} + +// NewClient returns a [net/http.Client] that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization headers to all outgoing requests. +func NewClient(opts *Options) (*http.Client, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + } + clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts) + if err != nil { + return nil, err + } + baseRoundTripper := opts.BaseRoundTripper + if baseRoundTripper == nil { + baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) + } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider + trans, err := newTransport(baseRoundTripper, opts) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: trans, + }, nil +} + +// SetAuthHeader uses the provided token to set the Authorization header on a +// request. If the token.Type is empty, the type is assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go new file mode 100644 index 00000000..467c477c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/trace.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + cloudTraceHeader = `X-Cloud-Trace-Context` +) + +// asserts the httpFormat fulfills this foreign interface +var _ propagation.HTTPFormat = (*httpFormat)(nil) + +// httpFormat implements propagation.httpFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Cloud Trace. +type httpFormat struct{} + +// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. +func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(cloudTraceHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 32) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Cloud Trace header. +func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(cloudTraceHeader, header) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go new file mode 100644 index 00000000..94caeb00 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -0,0 +1,211 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/cert" + "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" +) + +const ( + quotaProjectHeaderKey = "X-Goog-User-Project" +) + +func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { + var headers = opts.Headers + ht := &headerTransport{ + base: base, + headers: headers, + } + var trans http.RoundTripper = ht + trans = addOCTransport(trans, opts) + switch { + case opts.DisableAuthentication: + // Do nothing. + case opts.APIKey != "": + qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey)) + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + trans = &apiKeyTransport{ + Transport: trans, + Key: opts.APIKey, + } + default: + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + qp, err := creds.QuotaProjectID(context.Background()) + if err != nil { + return nil, err + } + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) + trans = &authTransport{ + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + } + } + return trans, nil +} + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + trans := http.DefaultTransport.(*http.Transport).Clone() + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + // Configures the ReadIdleTimeout HTTP/2 option for the + // transport. This allows broken idle connections to be pruned more quickly, + // preventing the client from attempting to re-use connections that will no + // longer work. + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } + + return trans +} + +type apiKeyTransport struct { + // Key is the API Key to set on requests. + Key string + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return t.Transport.RoundTrip(&newReq) +} + +type headerTransport struct { + headers http.Header + base http.RoundTripper +} + +func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + + for k, v := range t.headers { + newReq.Header[k] = v + } + + return rt.RoundTrip(&newReq) +} + +func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &httpFormat{}, + } +} + +type authTransport struct { + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string +} + +// getClientUniverseDomain returns the universe domain configured for the client. +// The default value is "googleapis.com". +func (t *authTransport) getClientUniverseDomain() string { + if t.clientUniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return t.clientUniverseDomain +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. Per the RoundTripper contract we must +// not modify the initial request, so we clone it, and we must close the body +// on any errors that happens during our token logic. +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + token, err := t.creds.Token(req.Context()) + if err != nil { + return nil, err + } + req2 := req.Clone(req.Context()) + SetAuthHeader(token, req2) + reqBodyClosed = true + return t.base.RoundTrip(req2) +} diff --git a/vendor/cloud.google.com/go/auth/internal/BUILD.bazel b/vendor/cloud.google.com/go/auth/internal/BUILD.bazel new file mode 100644 index 00000000..5e5a3412 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "internal", + srcs = ["internal.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/internal", + importpath = "cloud.google.com/go/auth/internal", + visibility = ["//vendor/cloud.google.com/go/auth:__subpackages__"], + deps = ["//vendor/cloud.google.com/go/compute/metadata"], +) diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/BUILD.bazel b/vendor/cloud.google.com/go/auth/internal/credsfile/BUILD.bazel new file mode 100644 index 00000000..8a5d1987 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "credsfile", + srcs = [ + "credsfile.go", + "filetype.go", + "parse.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/internal/credsfile", + importpath = "cloud.google.com/go/auth/internal/credsfile", + visibility = ["//vendor/cloud.google.com/go/auth:__subpackages__"], +) diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go new file mode 100644 index 00000000..9cd4bed6 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go @@ -0,0 +1,107 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credsfile is meant to hide implementation details from the pubic +// surface of the detect package. It should not import any other packages in +// this module. It is located under the main internal package so other +// sub-packages can use these parsed types as well. +package credsfile + +import ( + "os" + "os/user" + "path/filepath" + "runtime" +) + +const ( + // GoogleAppCredsEnvVar is the environment variable for setting the + // application default credentials. + GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + userCredsFilename = "application_default_credentials.json" +) + +// CredentialType represents different credential filetypes Google credentials +// can be. +type CredentialType int + +const ( + // UnknownCredType is an unidentified file type. + UnknownCredType CredentialType = iota + // UserCredentialsKey represents a user creds file type. + UserCredentialsKey + // ServiceAccountKey represents a service account file type. + ServiceAccountKey + // ImpersonatedServiceAccountKey represents a impersonated service account + // file type. + ImpersonatedServiceAccountKey + // ExternalAccountKey represents a external account file type. + ExternalAccountKey + // GDCHServiceAccountKey represents a GDCH file type. + GDCHServiceAccountKey + // ExternalAccountAuthorizedUserKey represents a external account authorized + // user file type. + ExternalAccountAuthorizedUserKey +) + +// parseCredentialType returns the associated filetype based on the parsed +// typeString provided. +func parseCredentialType(typeString string) CredentialType { + switch typeString { + case "service_account": + return ServiceAccountKey + case "authorized_user": + return UserCredentialsKey + case "impersonated_service_account": + return ImpersonatedServiceAccountKey + case "external_account": + return ExternalAccountKey + case "external_account_authorized_user": + return ExternalAccountAuthorizedUserKey + case "gdch_service_account": + return GDCHServiceAccountKey + default: + return UnknownCredType + } +} + +// GetFileNameFromEnv returns the override if provided or detects a filename +// from the environment. +func GetFileNameFromEnv(override string) string { + if override != "" { + return override + } + return os.Getenv(GoogleAppCredsEnvVar) +} + +// GetWellKnownFileName tries to locate the filepath for the user credential +// file based on the environment. +func GetWellKnownFileName() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename) +} + +// guessUnixHomeDir default to checking for HOME, but not all unix systems have +// this set, do have a fallback. +func guessUnixHomeDir() string { + if v := os.Getenv("HOME"); v != "" { + return v + } + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go new file mode 100644 index 00000000..69e30779 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -0,0 +1,149 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// Config3LO is the internals of a client creds file. +type Config3LO struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` +} + +// ClientCredentialsFile representation. +type ClientCredentialsFile struct { + Web *Config3LO `json:"web"` + Installed *Config3LO `json:"installed"` + UniverseDomain string `json:"universe_domain"` +} + +// ServiceAccountFile representation. +type ServiceAccountFile struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientID string `json:"client_id"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} + +// UserCredentialsFile representation. +type UserCredentialsFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + QuotaProjectID string `json:"quota_project_id"` + RefreshToken string `json:"refresh_token"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountFile representation. +type ExternalAccountFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + TokenURL string `json:"token_url"` + CredentialSource *CredentialSource `json:"credential_source,omitempty"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountAuthorizedUserFile representation. +type ExternalAccountAuthorizedUserFile struct { + Type string `json:"type"` + Audience string `json:"audience"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + TokenURL string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + RevokeURL string `json:"revoke_url"` + QuotaProjectID string `json:"quota_project_id"` + UniverseDomain string `json:"universe_domain"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + EnvironmentID string `json:"environment_id"` + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` +} + +// Format describes the format of a [CredentialSource]. +type Format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// ExecutableConfig represents the command to run for an executable +// [CredentialSource]. +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// ServiceAccountImpersonationInfo has impersonation configuration. +type ServiceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +// ImpersonatedServiceAccountFile representation. +type ImpersonatedServiceAccountFile struct { + Type string `json:"type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` + CredSource json.RawMessage `json:"source_credentials"` + UniverseDomain string `json:"universe_domain"` +} + +// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file. +type GDCHServiceAccountFile struct { + Type string `json:"type"` + FormatVersion string `json:"format_version"` + Project string `json:"project"` + Name string `json:"name"` + CertPath string `json:"ca_cert_path"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go new file mode 100644 index 00000000..a02b9f5d --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// ParseServiceAccount parses bytes into a [ServiceAccountFile]. +func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) { + var f *ServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseClientCredentials parses bytes into a +// [credsfile.ClientCredentialsFile]. +func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) { + var f *ClientCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseUserCredentials parses bytes into a [UserCredentialsFile]. +func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) { + var f *UserCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccount parses bytes into a [ExternalAccountFile]. +func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) { + var f *ExternalAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccountAuthorizedUser parses bytes into a +// [ExternalAccountAuthorizedUserFile]. +func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) { + var f *ExternalAccountAuthorizedUserFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseImpersonatedServiceAccount parses bytes into a +// [ImpersonatedServiceAccountFile]. +func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) { + var f *ImpersonatedServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile]. +func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) { + var f *GDCHServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +type fileTypeChecker struct { + Type string `json:"type"` +} + +// ParseFileType determines the [CredentialType] based on bytes provided. +func ParseFileType(b []byte) (CredentialType, error) { + var f fileTypeChecker + if err := json.Unmarshal(b, &f); err != nil { + return 0, err + } + return parseCredentialType(f.Type), nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go new file mode 100644 index 00000000..8c328e2f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -0,0 +1,198 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const ( + // TokenTypeBearer is the auth header prefix for bearer tokens. + TokenTypeBearer = "Bearer" + + // QuotaProjectEnvVar is the environment variable for setting the quota + // project. + QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 + + // DefaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + DefaultUniverseDomain = "googleapis.com" +) + +// CloneDefaultClient returns a [http.Client] with some good defaults. +func CloneDefaultClient() *http.Client { + return &http.Client{ + Transport: http.DefaultTransport.(*http.Transport).Clone(), + Timeout: 30 * time.Second, + } +} + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// GetQuotaProject retrieves quota project with precedence being: override, +// environment variable, creds json file. +func GetQuotaProject(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(QuotaProjectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + return v.QuotaProject +} + +// GetProjectID retrieves project with precedence being: override, +// environment variable, creds json file. +func GetProjectID(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(projectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + ProjectID string `json:"project_id"` // standard service account key + Project string `json:"project"` // gdch key + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + if v.ProjectID != "" { + return v.ProjectID + } + return v.Project +} + +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + +// ReadAll consumes the whole reader and safely reads the content of its body +// with some overflow protection. +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodySize)) +} + +// StaticCredentialsProperty is a helper for creating static credentials +// properties. +func StaticCredentialsProperty(s string) StaticProperty { + return StaticProperty(s) +} + +// StaticProperty always returns that value of the underlying string. +type StaticProperty string + +// GetProperty loads the properly value provided the given context. +func (p StaticProperty) GetProperty(context.Context) (string, error) { + return string(p), nil +} + +// ComputeUniverseDomainProvider fetches the credentials universe domain from +// the google cloud metadata service. +type ComputeUniverseDomainProvider struct { + universeDomainOnce sync.Once + universeDomain string + universeDomainErr error +} + +// GetProperty fetches the credentials universe domain from the google cloud +// metadata service. +func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { + c.universeDomainOnce.Do(func() { + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + }) + if c.universeDomainErr != nil { + return "", c.universeDomainErr + } + return c.universeDomain, nil +} + +// httpGetMetadataUniverseDomain is a package var for unit test substitution. +var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { + client := metadata.NewClient(&http.Client{Timeout: time.Second}) + return client.GetWithContext(ctx, "universe/universe_domain") +} + +func getMetadataUniverseDomain(ctx context.Context) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx) + if err == nil { + return universeDomain, nil + } + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return DefaultUniverseDomain, nil + } + return "", err +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/BUILD.bazel b/vendor/cloud.google.com/go/auth/internal/jwt/BUILD.bazel new file mode 100644 index 00000000..8419c5ee --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "jwt", + srcs = ["jwt.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/internal/jwt", + importpath = "cloud.google.com/go/auth/internal/jwt", + visibility = ["//vendor/cloud.google.com/go/auth:__subpackages__"], +) diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go new file mode 100644 index 00000000..dc28b3c3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +const ( + // HeaderAlgRSA256 is the RS256 [Header.Algorithm]. + HeaderAlgRSA256 = "RS256" + // HeaderAlgES256 is the ES256 [Header.Algorithm]. + HeaderAlgES256 = "ES256" + // HeaderType is the standard [Header.Type]. + HeaderType = "JWT" +) + +// Header represents a JWT header. +type Header struct { + Algorithm string `json:"alg"` + Type string `json:"typ"` + KeyID string `json:"kid"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Claims represents the claims set of a JWT. +type Claims struct { + // Iss is the issuer JWT claim. + Iss string `json:"iss"` + // Scope is the scope JWT claim. + Scope string `json:"scope,omitempty"` + // Exp is the expiry JWT claim. If unset, default is in one hour from now. + Exp int64 `json:"exp"` + // Iat is the subject issued at claim. If unset, default is now. + Iat int64 `json:"iat"` + // Aud is the audience JWT claim. Optional. + Aud string `json:"aud"` + // Sub is the subject JWT claim. Optional. + Sub string `json:"sub,omitempty"` + // AdditionalClaims contains any additional non-standard JWT claims. Optional. + AdditionalClaims map[string]interface{} `json:"-"` +} + +func (c *Claims) encode() (string, error) { + // Compensate for skew + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.AdditionalClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.AdditionalClaims) + if err != nil { + return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// EncodeJWS encodes the data using the provided key as a JSON web signature. +func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + claims, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, claims) + h := sha256.New() + h.Write([]byte(ss)) + sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// DecodeJWS decodes a claim set from a JWS payload. +func DecodeJWS(payload string) (*Claims, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + return nil, errors.New("invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &Claims{} + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil { + return nil, err + } + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil { + return nil, err + } + return c, err +} + +// VerifyJWS tests whether the provided JWT token's signature was produced by +// the private key associated with the provided public key. +func VerifyJWS(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jwt: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/BUILD.bazel b/vendor/cloud.google.com/go/auth/internal/transport/BUILD.bazel new file mode 100644 index 00000000..8ca9fd02 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "transport", + srcs = [ + "cba.go", + "s2a.go", + "transport.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/internal/transport", + importpath = "cloud.google.com/go/auth/internal/transport", + visibility = ["//vendor/cloud.google.com/go/auth:__subpackages__"], + deps = [ + "//vendor/cloud.google.com/go/auth/credentials", + "//vendor/cloud.google.com/go/auth/internal", + "//vendor/cloud.google.com/go/auth/internal/transport/cert", + "//vendor/cloud.google.com/go/compute/metadata", + "//vendor/github.com/google/s2a-go", + "//vendor/github.com/google/s2a-go/fallback", + "@org_golang_google_grpc//credentials", + ], +) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go new file mode 100644 index 00000000..d94e0af0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -0,0 +1,298 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/cert" + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE" + googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT" + googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// Options is a struct that is duplicated information from the individual +// transport packages in order to avoid cyclic deps. It correlates 1:1 with +// fields on httptransport.Options and grpctransport.Options. +type Options struct { + Endpoint string + DefaultMTLSEndpoint string + DefaultEndpointTemplate string + ClientCertProvider cert.Provider + Client *http.Client + UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool +} + +// getUniverseDomain returns the default service domain for a given Cloud +// universe. +func (o *Options) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *Options) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +// defaultEndpoint returns the DefaultEndpointTemplate merged with the +// universe domain if the DefaultEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultEndpoint() string { + if o.DefaultEndpointTemplate == "" { + return "" + } + return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the +// default endpoint. +func (o *Options) mergedEndpoint() (string, error) { + defaultEndpoint := o.defaultEndpoint() + u, err := url.Parse(fixScheme(defaultEndpoint)) + if err != nil { + return "", err + } + return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportCredsAndEndpoint returns an instance of +// [google.golang.org/grpc/credentials.TransportCredentials], and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfig returns a client certificate source and a function for +// dialing MTLS with S2A. +func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, nil, err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, nil +} + +func getTransportConfig(opts *Options) (*transportConfig, error) { + clientCertSource, err := GetClientCertificateProvider(opts) + if err != nil { + return nil, err + } + endpoint, err := getEndpoint(opts, clientCertSource) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + } + + if !shouldUseS2A(clientCertSource, opts) { + return &defaultTransportConfig, nil + } + if !opts.isUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aMTLSEndpoint := opts.DefaultMTLSEndpoint + + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: s2aMTLSEndpoint, + }, nil +} + +// GetClientCertificateProvider returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { + return nil, nil + } else if opts.ClientCertProvider != nil { + return opts.ClientCertProvider, nil + } + return cert.DefaultProvider() + +} + +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() +} + +type transportConfig struct { + // The client certificate source. + clientCertSource cert.Provider + // The corresponding endpoint to use based on client certificate source. + endpoint string + // The S2A address if it can be used, otherwise an empty string. + s2aAddress string + // The MTLS endpoint to use with S2A. + s2aMTLSEndpoint string +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { + if opts.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if !opts.isUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return opts.DefaultMTLSEndpoint, nil + } + return opts.defaultEndpoint(), nil + } + if strings.Contains(opts.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return opts.Endpoint, nil + } + if opts.defaultEndpoint() == "" { + // If DefaultEndpointTemplate is not configured, + // use the user provided endpoint verbatim. This allows a naked + // "host[:port]" URL to be used with GRPC Direct Path. + return opts.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return opts.mergedEndpoint() +} + +func getMTLSMode() string { + mode := os.Getenv(googleAPIUseMTLS) + if mode == "" { + mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/BUILD.bazel b/vendor/cloud.google.com/go/auth/internal/transport/cert/BUILD.bazel new file mode 100644 index 00000000..05cad72a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "cert", + srcs = [ + "default_cert.go", + "enterprise_cert.go", + "secureconnect_cert.go", + "workload_cert.go", + ], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/internal/transport/cert", + importpath = "cloud.google.com/go/auth/internal/transport/cert", + visibility = ["//vendor/cloud.google.com/go/auth:__subpackages__"], + deps = [ + "//vendor/github.com/googleapis/enterprise-certificate-proxy/client", + "//vendor/github.com/googleapis/enterprise-certificate-proxy/client/util", + ], +) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go new file mode 100644 index 00000000..5cedc50f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certificate provider created by [DefaultProvider]. +// +// A singleton model is used to allow the provider to be reused +// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil) +// may be returned to indicate a default provider could not be found, which +// will skip extra tls config in the transport layer . +type defaultCertData struct { + once sync.Once + provider Provider + err error +} + +var ( + defaultCert defaultCertData +) + +// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultProvider() (Provider, error) { + defaultCert.once.Do(func() { + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } + } + } + }) + return defaultCert.provider, defaultCert.err +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go new file mode 100644 index 00000000..36651591 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxyProvider creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go new file mode 100644 index 00000000..3227aba2 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -0,0 +1,124 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectProvider creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 00000000..e8675bf8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", errSourceUnavailable + } + return "", "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errSourceUnavailable + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go new file mode 100644 index 00000000..2ed532de --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -0,0 +1,180 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/json" + "log" + "os" + "strconv" + "sync" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" + "cloud.google.com/go/compute/metadata" +) + +const ( + configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" +) + +var ( + // The period an MTLS config can be reused before needing refresh. + configExpiry = time.Hour + + // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. + mtlsOnce sync.Once +) + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +// Returns empty string if not set or invalid. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + mtlsOnce.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if opts.Client != nil { + return false + } + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv)) + if err != nil { + return false + } + return b +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go new file mode 100644 index 00000000..718a6b17 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -0,0 +1,103 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provided internal helpers for the two transport packages +// (grpctransport and httptransport). +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth/credentials" +) + +// CloneDetectOptions clones a user set detect option into some new memory that +// we can internally manipulate before sending onto the detect package. +func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { + if oldDo == nil { + // it is valid for users not to set this, but we will need to to default + // some options for them in this case so return some initialized memory + // to work with. + return &credentials.DetectOptions{} + } + newDo := &credentials.DetectOptions{ + // Simple types + Audience: oldDo.Audience, + Subject: oldDo.Subject, + EarlyTokenRefresh: oldDo.EarlyTokenRefresh, + TokenURL: oldDo.TokenURL, + STSAudience: oldDo.STSAudience, + CredentialsFile: oldDo.CredentialsFile, + UseSelfSignedJWT: oldDo.UseSelfSignedJWT, + UniverseDomain: oldDo.UniverseDomain, + + // These fields are are pointer types that we just want to use exactly + // as the user set, copy the ref + Client: oldDo.Client, + AuthHandlerOptions: oldDo.AuthHandlerOptions, + } + + // Smartly size this memory and copy below. + if len(oldDo.CredentialsJSON) > 0 { + newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) + copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) + } + if len(oldDo.Scopes) > 0 { + newDo.Scopes = make([]string, len(oldDo.Scopes)) + copy(newDo.Scopes, oldDo.Scopes) + } + + return newDo +} + +// ValidateUniverseDomain verifies that the universe domain configured for the +// client matches the universe domain configured for the credentials. +func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error { + if clientUniverseDomain != credentialsUniverseDomain { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "the universe domain explicitly, \"googleapis.com\" is the default", + clientUniverseDomain, + credentialsUniverseDomain) + } + return nil +} + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/BUILD.bazel b/vendor/cloud.google.com/go/auth/oauth2adapt/BUILD.bazel new file mode 100644 index 00000000..d472150a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "oauth2adapt", + srcs = ["oauth2adapt.go"], + importmap = "peridot.resf.org/vendor/cloud.google.com/go/auth/oauth2adapt", + importpath = "cloud.google.com/go/auth/oauth2adapt", + visibility = ["//visibility:public"], + deps = [ + "//vendor/cloud.google.com/go/auth", + "@org_golang_x_oauth2//:oauth2", + "@org_golang_x_oauth2//google", + ], +) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md new file mode 100644 index 00000000..f75c8e20 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -0,0 +1,47 @@ +# Changelog + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16) + + +### Features + +* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## 0.1.0 (2023-10-19) + + +### Features + +* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f)) +* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go new file mode 100644 index 00000000..9835ac57 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -0,0 +1,164 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth] +// and [golang.org/x/oauth2]. +package oauth2adapt + +import ( + "context" + "encoding/json" + "errors" + + "cloud.google.com/go/auth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] +// into a [cloud.google.com/go/auth.TokenProvider]. +func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { + return &tokenProviderAdapter{ts: ts} +} + +type tokenProviderAdapter struct { + ts oauth2.TokenSource +} + +// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It +// is a light wrapper around the underlying TokenSource. +func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { + tok, err := tp.ts.Token() + if err != nil { + var err2 *oauth2.RetrieveError + if ok := errors.As(err, &err2); ok { + return nil, AuthErrorFromRetrieveError(err2) + } + return nil, err + } + return &auth.Token{ + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + }, nil +} + +// TokenSourceFromTokenProvider converts any +// [cloud.google.com/go/auth.TokenProvider] into a +// [golang.org/x/oauth2.TokenSource]. +func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource { + return &tokenSourceAdapter{tp: tp} +} + +type tokenSourceAdapter struct { + tp auth.TokenProvider +} + +// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It +// is a light wrapper around the underlying TokenProvider. +func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { + tok, err := ts.tp.Token(context.Background()) + if err != nil { + var err2 *auth.Error + if ok := errors.As(err, &err2); ok { + return nil, AddRetrieveErrorToAuthError(err2) + } + return nil, err + } + return &oauth2.Token{ + AccessToken: tok.Value, + TokenType: tok.Type, + Expiry: tok.Expiry, + }, nil +} + +// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] +// to a [cloud.google.com/go/auth.Credentials]. +func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials { + if creds == nil { + return nil + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: TokenProviderFromTokenSource(creds.TokenSource), + JSON: creds.JSON, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.ProjectID, nil + }), + UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.GetUniverseDomain() + }), + }) +} + +// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials] +// to a [golang.org/x/oauth2/google.Credentials]. +func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials { + if creds == nil { + return nil + } + // Throw away errors as old credentials are not request aware. Also, no + // network requests are currently happening for this use case. + projectID, _ := creds.ProjectID(context.Background()) + + return &google.Credentials{ + TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider), + ProjectID: projectID, + JSON: creds.JSON(), + UniverseDomainProvider: func() (string, error) { + return creds.UniverseDomain(context.Background()) + }, + } +} + +type oauth2Error struct { + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +// AddRetrieveErrorToAuthError returns the same error provided and adds a +// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the +// [cloud.google.com/go/auth.Error]. +func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error { + if err == nil { + return nil + } + e := &oauth2.RetrieveError{ + Response: err.Response, + Body: err.Body, + } + err.Err = e + if len(err.Body) > 0 { + var oErr oauth2Error + // ignore the error as it only fills in extra details + json.Unmarshal(err.Body, &oErr) + e.ErrorCode = oErr.ErrorCode + e.ErrorDescription = oErr.ErrorDescription + e.ErrorURI = oErr.ErrorURI + } + return err +} + +// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that +// wraps the provided [golang.org/x/oauth2.RetrieveError]. +func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error { + if err == nil { + return nil + } + return &auth.Error{ + Response: err.Response, + Body: err.Body, + Err: err, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go new file mode 100644 index 00000000..a8ce6cd8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -0,0 +1,368 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for +// OAuth consent at the specified auth code URL and returns an auth code and +// state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// Options3LO are the options for doing a 3-legged OAuth2 flow. +type Options3LO struct { + // ClientID is the application's ID. + ClientID string + // ClientSecret is the application's secret. Not required if AuthHandlerOpts + // is set. + ClientSecret string + // AuthURL is the URL for authenticating. + AuthURL string + // TokenURL is the URL for retrieving a token. + TokenURL string + // AuthStyle is used to describe how to client info in the token request. + AuthStyle Style + // RefreshToken is the token used to refresh the credential. Not required + // if AuthHandlerOpts is set. + RefreshToken string + // RedirectURL is the URL to redirect users to. Optional. + RedirectURL string + // Scopes specifies requested permissions for the Token. Optional. + Scopes []string + + // URLParams are the set of values to apply to the token exchange. Optional. + URLParams url.Values + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // EarlyTokenExpiry is the time before the token expires that it should be + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. + EarlyTokenExpiry time.Duration + + // AuthHandlerOpts provides a set of options for doing a + // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. + AuthHandlerOpts *AuthorizationHandlerOptions +} + +func (o *Options3LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.ClientID == "" { + return errors.New("auth: client ID must be provided") + } + if o.AuthHandlerOpts == nil && o.ClientSecret == "" { + return errors.New("auth: client secret must be provided") + } + if o.AuthURL == "" { + return errors.New("auth: auth URL must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + if o.AuthStyle == StyleUnknown { + return errors.New("auth: auth style must be provided") + } + if o.AuthHandlerOpts == nil && o.RefreshToken == "" { + return errors.New("auth: refresh token must be provided") + } + return nil +} + +// PKCEOptions holds parameters to support PKCE. +type PKCEOptions struct { + // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. + Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier. + // ChallengeMethod is the encryption method (ex. S256). + ChallengeMethod string + // Verifier is the original, non-encrypted secret. + Verifier string // The original, non-encrypted secret. +} + +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + // error fields + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +func (o *Options3LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.CloneDefaultClient() +} + +// authCodeURL returns a URL that points to a OAuth2 consent page. +func (o *Options3LO) authCodeURL(state string, values url.Values) string { + var buf bytes.Buffer + buf.WriteString(o.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {o.ClientID}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if len(o.Scopes) > 0 { + v.Set("scope", strings.Join(o.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + if o.AuthHandlerOpts != nil { + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Challenge != "" { + v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge) + } + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" { + v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod) + } + } + for k := range values { + v.Set(k, v.Get(k)) + } + if strings.Contains(o.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2 +// configuration. The TokenProvider is caches and auto-refreshes tokens by +// default. +func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.AuthHandlerOpts != nil { + return new3LOTokenProviderWithAuthHandler(opts), nil + } + return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }), nil +} + +// AuthorizationHandlerOptions provides a set of options to specify for doing a +// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. +type AuthorizationHandlerOptions struct { + // AuthorizationHandler specifies the handler used to for the authorization + // part of the flow. + Handler AuthorizationHandler + // State is used verify that the "state" is identical in the request and + // response before exchanging the auth code for OAuth2 token. + State string + // PKCEOpts allows setting configurations for PKCE. Optional. + PKCEOpts *PKCEOptions +} + +func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider { + return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }) +} + +// exchange handles the final exchange portion of the 3lo flow. Returns a Token, +// refreshToken, and error. +func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) { + // Build request + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if o.AuthHandlerOpts != nil && + o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Verifier != "" { + v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier) + } + for k := range o.URLParams { + v.Set(k, o.URLParams.Get(k)) + } + return fetchToken(ctx, o, v) +} + +// This struct is not safe for concurrent access alone, but the way it is used +// in this package by wrapping it with a cachedTokenProvider makes it so. +type tokenProvider3LO struct { + opts *Options3LO + client *http.Client + refreshToken string +} + +func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) { + if tp.refreshToken == "" { + return nil, errors.New("auth: token expired and refresh token is not set") + } + v := url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tp.refreshToken}, + } + for k := range tp.opts.URLParams { + v.Set(k, tp.opts.URLParams.Get(k)) + } + + tk, rt, err := fetchToken(ctx, tp.opts, v) + if err != nil { + return nil, err + } + if tp.refreshToken != rt && rt != "" { + tp.refreshToken = rt + } + return tk, err +} + +type tokenProviderWithHandler struct { + opts *Options3LO + state string +} + +func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) { + url := tp.opts.authCodeURL(tp.state, nil) + code, state, err := tp.opts.AuthHandlerOpts.Handler(url) + if err != nil { + return nil, err + } + if state != tp.state { + return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow") + } + tok, _, err := tp.opts.exchange(ctx, code) + return tok, err +} + +// fetchToken returns a Token, refresh token, and/or an error. +func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) { + var refreshToken string + if o.AuthStyle == StyleInParams { + if o.ClientID != "" { + v.Set("client_id", o.ClientID) + } + if o.ClientSecret != "" { + v.Set("client_secret", o.ClientSecret) + } + } + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, refreshToken, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if o.AuthStyle == StyleInHeader { + req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) + } + + // Make request + resp, body, err := internal.DoRequest(o.client(), req) + if err != nil { + return nil, refreshToken, err + } + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 + tokError := &Error{ + Response: resp, + Body: body, + } + + var token *Token + // errors ignored because of default switch on content + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err) + } + tokError.code = vals.Get("error") + tokError.description = vals.Get("error_description") + tokError.uri = vals.Get("error_uri") + token = &Token{ + Value: vals.Get("access_token"), + Type: vals.Get("token_type"), + Metadata: make(map[string]interface{}, len(vals)), + } + for k, v := range vals { + token.Metadata[k] = v + } + refreshToken = vals.Get("refresh_token") + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err) + } + tokError.code = tj.ErrorCode + tokError.description = tj.ErrorDescription + tokError.uri = tj.ErrorURI + token = &Token{ + Value: tj.AccessToken, + Type: tj.TokenType, + Expiry: tj.expiry(), + Metadata: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Metadata) // optional field, skip err check + refreshToken = tj.RefreshToken + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || tokError.code != "" { + return nil, refreshToken, tokError + } + if token.Value == "" { + return nil, refreshToken, errors.New("auth: server response missing access_token") + } + return token, refreshToken, nil +} diff --git a/vendor/cloud.google.com/go/compute/internal/BUILD.bazel b/vendor/cloud.google.com/go/compute/internal/BUILD.bazel deleted file mode 100644 index 4f1344ab..00000000 --- a/vendor/cloud.google.com/go/compute/internal/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "internal", - srcs = ["version.go"], - importmap = "peridot.resf.org/vendor/cloud.google.com/go/compute/internal", - importpath = "cloud.google.com/go/compute/internal", - visibility = ["//vendor/cloud.google.com/go/compute:__subpackages__"], -) diff --git a/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel b/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel index 90f0e9d1..8f7e341f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel +++ b/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel @@ -6,8 +6,17 @@ go_library( "metadata.go", "retry.go", "retry_linux.go", + "syscheck.go", + "syscheck_linux.go", + "syscheck_windows.go", ], importmap = "peridot.resf.org/vendor/cloud.google.com/go/compute/metadata", importpath = "cloud.google.com/go/compute/metadata", visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows/registry", + ], + "//conditions:default": [], + }), ) diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 06b95734..9594e1e2 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,31 @@ # Changes +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) + + +### Features + +* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510)) + ## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index c17faa14..345080b7 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -23,12 +23,11 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -88,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.Get(c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,78 +189,213 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on the default client. +// Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) + return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } +// SubscribeWithContext calls Client.SubscribeWithContext on the default client. +func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(ctx, suffix, fn) +} + +// Get calls Client.GetWithContext on the default client. +// +// Deprecated: Please use the context aware variant [GetWithContext]. +func Get(suffix string) (string, error) { + return defaultClient.GetWithContext(context.Background(), suffix) +} + +// GetWithContext calls Client.GetWithContext on the default client. +func GetWithContext(ctx context.Context, suffix string) (string, error) { + return defaultClient.GetWithContext(ctx, suffix) +} // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} + +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -282,14 +418,12 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() +func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -306,7 +440,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { } suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequestWithContext(ctx, "GET", u, nil) if err != nil { return "", "", err } @@ -336,7 +470,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := ioutil.ReadAll(res.Body) + all, err := io.ReadAll(res.Body) if err != nil { return "", "", err } @@ -354,19 +488,37 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// Deprecated: Please use the context aware variant [Client.GetWithContext]. func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) + return c.GetWithContext(context.Background(), suffix) +} + +// GetWithContext returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. +func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { + val, _, err := c.getETag(ctx, suffix) return val, err } -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) +func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) { + s, err = c.GetWithContext(ctx, suffix) s = strings.TrimSpace(s) return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -378,45 +530,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.Get("instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -427,13 +638,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -444,12 +669,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -459,8 +706,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -471,39 +732,71 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. // -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Deprecated: Please use the context aware variant [Client.SubscribeWithContext]. func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// SubscribeWithContext calls fn with the latest metadata value indicated by the +// provided suffix. If the metadata value is deleted, fn is called with the +// empty string and ok false. Subscribe blocks until fn returns a non-nil error +// or the value is deleted. Subscribe returns the error value returned from the +// last call to fn, which may be nil when ok == false. +func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) + val, lastETag, err := c.getETag(ctx, suffix) if err != nil { return err } - if err := fn(val, true); err != nil { + if err := fn(ctx, val, true); err != nil { return err } @@ -514,7 +807,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) @@ -524,7 +817,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro } lastETag = etag - if err := fn(val, ok); err != nil || !ok { + if err := fn(ctx, val, ok); err != nil || !ok { return err } } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go index 0f18f3cd..3d4bc75d 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -27,7 +27,7 @@ const ( ) var ( - syscallRetryable = func(err error) bool { return false } + syscallRetryable = func(error) bool { return false } ) // defaultBackoff is basically equivalent to gax.Backoff without the need for diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go similarity index 60% rename from vendor/cloud.google.com/go/compute/metadata/tidyfix.go rename to vendor/cloud.google.com/go/compute/metadata/syscheck.go index 4cef4850..e0704fa6 100644 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack +//go:build !windows && !linux package metadata -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go similarity index 66% rename from vendor/cloud.google.com/go/compute/internal/version.go rename to vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go index 291a237f..74689acb 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +//go:build linux -// Version is the current tagged release of the library. -const Version = "1.24.0" +package metadata + +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" +} diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go similarity index 53% rename from vendor/google.golang.org/genproto/internal/doc.go rename to vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go index 90e89b4a..c0ce6278 100644 --- a/vendor/google.golang.org/genproto/internal/doc.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,6 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file makes internal an importable go package -// for use with backreferences from submodules. -package internal +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/cloud.google.com/go/iam/BUILD.bazel b/vendor/cloud.google.com/go/iam/BUILD.bazel index 8ab18dcc..f5a9f3a2 100644 --- a/vendor/cloud.google.com/go/iam/BUILD.bazel +++ b/vendor/cloud.google.com/go/iam/BUILD.bazel @@ -9,7 +9,7 @@ go_library( deps = [ "//vendor/cloud.google.com/go/iam/apiv1/iampb", "@com_github_googleapis_gax_go_v2//:gax-go", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//metadata", ], diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 43a17938..2331bfc1 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,41 @@ # Changes +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26) + + +### Bug Fixes + +* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + +## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) + + +### Bug Fixes + +* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14) + + +### Bug Fixes + +* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + ## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/BUILD.bazel b/vendor/cloud.google.com/go/iam/apiv1/iampb/BUILD.bazel index c3691dcf..be567fab 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/BUILD.bazel +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/BUILD.bazel @@ -13,7 +13,7 @@ go_library( deps = [ "@org_golang_google_genproto//googleapis/type/expr", "@org_golang_google_genproto_googleapis_api//annotations", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//reflect/protoreflect", diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index b5243e61..619b4c4f 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/iam_policy.proto package iampb @@ -388,7 +388,7 @@ func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { } var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_iam_policy_proto_goTypes = []any{ (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest @@ -422,7 +422,7 @@ func file_google_iam_v1_iam_policy_proto_init() { file_google_iam_v1_options_proto_init() file_google_iam_v1_policy_proto_init() if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRequest); i { case 0: return &v.state @@ -434,7 +434,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRequest); i { case 0: return &v.state @@ -446,7 +446,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRequest); i { case 0: return &v.state @@ -458,7 +458,7 @@ func file_google_iam_v1_iam_policy_proto_init() { return nil } } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsResponse); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 3f854fe4..f1c1c084 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/options.proto package iampb @@ -136,7 +136,7 @@ func file_google_iam_v1_options_proto_rawDescGZIP() []byte { } var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ +var file_google_iam_v1_options_proto_goTypes = []any{ (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions } var file_google_iam_v1_options_proto_depIdxs = []int32{ @@ -153,7 +153,7 @@ func file_google_iam_v1_options_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GetPolicyOptions); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index dfc60661..4dda5d6d 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/iam/v1/policy.proto package iampb @@ -1036,7 +1036,7 @@ func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ +var file_google_iam_v1_policy_proto_goTypes = []any{ (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action @@ -1073,7 +1073,7 @@ func file_google_iam_v1_policy_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Policy); i { case 0: return &v.state @@ -1085,7 +1085,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Binding); i { case 0: return &v.state @@ -1097,7 +1097,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AuditConfig); i { case 0: return &v.state @@ -1109,7 +1109,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AuditLogConfig); i { case 0: return &v.state @@ -1121,7 +1121,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*PolicyDelta); i { case 0: return &v.state @@ -1133,7 +1133,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*BindingDelta); i { case 0: return &v.state @@ -1145,7 +1145,7 @@ func file_google_iam_v1_policy_proto_init() { return nil } } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AuditConfigDelta); i { case 0: return &v.state diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index ae8a1fc1..a3e99df2 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -199,6 +199,36 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/apphub/apiv1": { + "api_shortname": "apphub", + "distribution_name": "cloud.google.com/go/apphub/apiv1", + "description": "App Hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apphub/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/events/subscriptions/apiv1": { + "api_shortname": "workspaceevents", + "distribution_name": "cloud.google.com/go/apps/events/subscriptions/apiv1", + "description": "Google Workspace Events API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/events/subscriptions/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/meet/apiv2": { + "api_shortname": "meet", + "distribution_name": "cloud.google.com/go/apps/meet/apiv2", + "description": "Google Meet API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/apps/meet/apiv2beta": { "api_shortname": "meet", "distribution_name": "cloud.google.com/go/apps/meet/apiv2beta", @@ -309,6 +339,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/backupdr/apiv1": { + "api_shortname": "backupdr", + "distribution_name": "cloud.google.com/go/backupdr/apiv1", + "description": "Backup and DR Service API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/baremetalsolution/apiv2": { "api_shortname": "baremetalsolution", "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", @@ -619,6 +659,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/chat/apiv1": { + "api_shortname": "chat", + "distribution_name": "cloud.google.com/go/chat/apiv1", + "description": "Google Chat API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/chat/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "api_shortname": "cloudbuild", "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", @@ -639,6 +689,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/cloudcontrolspartner/apiv1": { + "api_shortname": "cloudcontrolspartner", + "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1", + "description": "Cloud Controls Partner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudcontrolspartner/apiv1beta": { + "api_shortname": "cloudcontrolspartner", + "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1beta", + "description": "Cloud Controls Partner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/clouddms/apiv1": { "api_shortname": "datamigration", "distribution_name": "cloud.google.com/go/clouddms/apiv1", @@ -706,7 +776,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { @@ -756,7 +826,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/contactcenterinsights/apiv1": { @@ -959,6 +1029,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/developerconnect/apiv1": { + "api_shortname": "developerconnect", + "distribution_name": "cloud.google.com/go/developerconnect/apiv1", + "description": "Developer Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/apiv2": { "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -1032,7 +1112,7 @@ "cloud.google.com/go/dlp/apiv2": { "api_shortname": "dlp", "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP)", + "description": "Sensitive Data Protection (DLP)", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", @@ -1319,6 +1399,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/identitytoolkit/apiv2": { + "api_shortname": "identitytoolkit", + "distribution_name": "cloud.google.com/go/identitytoolkit/apiv2", + "description": "Identity Toolkit API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/identitytoolkit/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/ids/apiv1": { "api_shortname": "ids", "distribution_name": "cloud.google.com/go/ids/apiv1", @@ -1439,6 +1529,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/managedkafka/apiv1": { + "api_shortname": "managedkafka", + "distribution_name": "cloud.google.com/go/managedkafka/apiv1", + "description": "Apache Kafka for BigQuery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/addressvalidation/apiv1": { "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", @@ -1469,16 +1569,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "api_shortname": "mapsplatformdatasets", - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/maps/places/apiv1": { "api_shortname": "places", "distribution_name": "cloud.google.com/go/maps/places/apiv1", @@ -1486,7 +1576,17 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "stable", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routeoptimization/apiv1": { + "api_shortname": "routeoptimization", + "distribution_name": "cloud.google.com/go/maps/routeoptimization/apiv1", + "description": "Route Optimization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routeoptimization/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routing/apiv2": { @@ -1499,6 +1599,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/solar/apiv1": { + "api_shortname": "solar", + "distribution_name": "cloud.google.com/go/maps/solar/apiv1", + "description": "Solar API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "api_shortname": "mediatranslation", "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", @@ -1566,7 +1676,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { @@ -1606,7 +1716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { @@ -1649,6 +1759,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/networkservices/apiv1": { + "api_shortname": "networkservices", + "distribution_name": "cloud.google.com/go/networkservices/apiv1", + "description": "Network Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/notebooks/apiv1": { "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", @@ -1779,6 +1899,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/parallelstore/apiv1beta": { + "api_shortname": "parallelstore", + "distribution_name": "cloud.google.com/go/parallelstore/apiv1beta", + "description": "Parallelstore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "api_shortname": "phishingprotection", "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", @@ -1966,7 +2096,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { @@ -2002,7 +2132,7 @@ "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", @@ -2012,7 +2142,7 @@ "cloud.google.com/go/retail/apiv2alpha": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", @@ -2022,7 +2152,7 @@ "cloud.google.com/go/retail/apiv2beta": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", @@ -2079,6 +2209,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/secretmanager/apiv1beta2": { + "api_shortname": "secretmanager", + "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta2", + "description": "Secret Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/securesourcemanager/apiv1": { "api_shortname": "securesourcemanager", "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", @@ -2099,6 +2239,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/security/publicca/apiv1": { + "api_shortname": "publicca", + "distribution_name": "cloud.google.com/go/security/publicca/apiv1", + "description": "Public Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/security/publicca/apiv1beta1": { "api_shortname": "publicca", "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", @@ -2139,6 +2289,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/securitycenter/apiv2": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/apiv2", + "description": "Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", @@ -2159,6 +2319,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/securityposture/apiv1": { + "api_shortname": "securityposture", + "distribution_name": "cloud.google.com/go/securityposture/apiv1", + "description": "Security Posture API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securityposture/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/servicecontrol/apiv1": { "api_shortname": "servicecontrol", "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", @@ -2189,6 +2359,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/servicehealth/apiv1": { + "api_shortname": "servicehealth", + "distribution_name": "cloud.google.com/go/servicehealth/apiv1", + "description": "Service Health API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/servicemanagement/apiv1": { "api_shortname": "servicemanagement", "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", @@ -2229,6 +2409,36 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/conversions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", @@ -2239,6 +2449,66 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/lfp/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/lfp/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/lfp/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/notifications/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/notifications/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/notifications/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/products/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/quota/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/reports/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/reports/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reports/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/spanner": { "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", @@ -2329,6 +2599,16 @@ "release_level": "stable", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/storage/control/apiv2": { + "api_shortname": "storage", + "distribution_name": "cloud.google.com/go/storage/control/apiv2", + "description": "Storage Control API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/storage/internal/apiv2": { "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage/internal/apiv2", @@ -2359,6 +2639,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/streetview/publish/apiv1": { + "api_shortname": "streetviewpublish", + "distribution_name": "cloud.google.com/go/streetview/publish/apiv1", + "description": "Street View Publish API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/streetview/latest/publish/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/support/apiv2": { "api_shortname": "cloudsupport", "distribution_name": "cloud.google.com/go/support/apiv2", @@ -2529,6 +2819,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/visionai/apiv1": { + "api_shortname": "visionai", + "distribution_name": "cloud.google.com/go/visionai/apiv1", + "description": "Vision AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/visionai/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/vmmigration/apiv1": { "api_shortname": "vmmigration", "distribution_name": "cloud.google.com/go/vmmigration/apiv1", @@ -2636,7 +2936,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workstations/apiv1beta": { diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh new file mode 100644 index 00000000..59c19065 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/gen_info.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Script to generate info.go files with methods for all clients. + +if [[ $# != 2 ]]; then + echo >&2 "usage: $0 DIR PACKAGE" + exit 1 +fi + +outfile=info.go + +cd $1 + +cat <<'EOF' > $outfile +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. + +EOF + +echo -e >> $outfile "package $2\n" + + +awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ { + printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3); + printf(" c.setGoogleClientInfo(keyval...)\n"); + printf("}\n\n"); +}' *_client.go >> $outfile + +gofmt -w $outfile diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go index eabed000..e8daf800 100644 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "strings" + "sync" "go.opencensus.io/trace" "go.opentelemetry.io/otel" @@ -32,17 +33,22 @@ import ( ) const ( + // Deprecated: The default experimental tracing support for OpenCensus is + // now deprecated in the Google Cloud client libraries for Go. // TelemetryPlatformTracingOpenCensus is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenCensus tracing. TelemetryPlatformTracingOpenCensus = "opencensus" - // TelemetryPlatformTracingOpenCensus is the value to which the environment + // TelemetryPlatformTracingOpenTelemetry is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenTelemetry tracing. TelemetryPlatformTracingOpenTelemetry = "opentelemetry" - // TelemetryPlatformTracingOpenCensus is the name of the environment - // variable that can be set to change the default tracing from OpenCensus - // to OpenTelemetry. + // TelemetryPlatformTracingVar is the name of the environment + // variable that can be set to change the default tracing from OpenTelemetry + // to OpenCensus. + // + // The default experimental tracing support for OpenCensus is now deprecated + // in the Google Cloud client libraries for Go. TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer // when it is obtained from the OpenTelemetry TracerProvider. @@ -50,39 +56,58 @@ const ( ) var ( - // OpenTelemetryTracingEnabled is true if the environment variable + // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field + openCensusTracingEnabledMu = sync.RWMutex{} + // openCensusTracingEnabled is true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the - // case-insensitive value "opentelemetry". - // - // Do not access directly. Use instead IsOpenTelemetryTracingEnabled or - // IsOpenCensusTracingEnabled. Intended for use only in unit tests. Restore - // original value after each test. - OpenTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( - os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) + // case-insensitive value "opencensus". + openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus) ) +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided +// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of +// unit testing. Do not invoke it directly. Intended for use only in unit tests. +// Restore original value after each test. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. +func SetOpenTelemetryTracingEnabledField(enabled bool) { + openCensusTracingEnabledMu.Lock() + defer openCensusTracingEnabledMu.Unlock() + openCensusTracingEnabled = !enabled +} + +// Deprecated: The default experimental tracing support for OpenCensus is now +// deprecated in the Google Cloud client libraries for Go. +// // IsOpenCensusTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the +// case-insensitive value "opencensus". func IsOpenCensusTracingEnabled() bool { - return !IsOpenTelemetryTracingEnabled() + openCensusTracingEnabledMu.RLock() + defer openCensusTracingEnabledMu.RUnlock() + return openCensusTracingEnabled } // IsOpenTelemetryTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the +// case-insensitive value "opencensus". func IsOpenTelemetryTracingEnabled() bool { - return OpenTelemetryTracingEnabled + return !IsOpenCensusTracingEnabled() } // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func StartSpan(ctx context.Context, name string) context.Context { if IsOpenTelemetryTracingEnabled() { ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) @@ -96,10 +121,13 @@ func StartSpan(ctx context.Context, name string) context.Context { // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func EndSpan(ctx context.Context, err error) { if IsOpenTelemetryTracingEnabled() { span := ottrace.SpanFromContext(ctx) @@ -182,10 +210,13 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 { // OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected // span must be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { if IsOpenTelemetryTracingEnabled() { attrs := otAttrs(attrMap) diff --git a/vendor/cloud.google.com/go/storage/BUILD.bazel b/vendor/cloud.google.com/go/storage/BUILD.bazel index fd0d8b6c..ccf63cd3 100644 --- a/vendor/cloud.google.com/go/storage/BUILD.bazel +++ b/vendor/cloud.google.com/go/storage/BUILD.bazel @@ -47,10 +47,12 @@ go_library( "@org_golang_google_api//transport/http", "@org_golang_google_genproto//googleapis/type/date", "@org_golang_google_genproto//googleapis/type/expr", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//encoding", "@org_golang_google_grpc//metadata", "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protowire", "@org_golang_google_protobuf//proto", "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//types/known/durationpb", diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 8b3fa6fc..e9fb5558 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,135 @@ # Changes +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03) + + +### Features + +* **storage/transfermanager:** Add DownloadDirectory ([#10430](https://github.com/googleapis/google-cloud-go/issues/10430)) ([0d0e5dd](https://github.com/googleapis/google-cloud-go/commit/0d0e5dd5214769cc2c197991c2ece1303bd600de)) +* **storage/transfermanager:** Automatically shard downloads ([#10379](https://github.com/googleapis/google-cloud-go/issues/10379)) ([05816f9](https://github.com/googleapis/google-cloud-go/commit/05816f9fafd3132c371da37f3a879bb9e8e7e604)) + + +### Bug Fixes + +* **storage/transfermanager:** WaitAndClose waits for Callbacks to finish ([#10504](https://github.com/googleapis/google-cloud-go/issues/10504)) ([0e81002](https://github.com/googleapis/google-cloud-go/commit/0e81002b3a5e560c874d814d28a35a102311d9ef)), refs [#10502](https://github.com/googleapis/google-cloud-go/issues/10502) +* **storage:** Allow empty soft delete on Create ([#10394](https://github.com/googleapis/google-cloud-go/issues/10394)) ([d8bd2c1](https://github.com/googleapis/google-cloud-go/commit/d8bd2c1ffc4f27503a74ded438d8bfbdd7707c63)), refs [#10380](https://github.com/googleapis/google-cloud-go/issues/10380) +* **storage:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **storage:** Retry broken pipe error ([#10374](https://github.com/googleapis/google-cloud-go/issues/10374)) ([2f4daa1](https://github.com/googleapis/google-cloud-go/commit/2f4daa11acf9d3f260fa888333090359c4d9198e)), refs [#9178](https://github.com/googleapis/google-cloud-go/issues/9178) + + +### Documentation + +* **storage/control:** Remove allowlist note from Folders RPCs ([d6c543c](https://github.com/googleapis/google-cloud-go/commit/d6c543c3969016c63e158a862fc173dff60fb8d9)) + +## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10) + + +### Features + +* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71)) +* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146) +* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05)) + + +### Bug Fixes + +* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf)) + + +### Documentation + +* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd)) + +## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13) + + +### Features + +* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7)) +* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91)) + + +### Bug Fixes + +* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289)) +* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90)) +* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) +* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5)) +* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705) +* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478) +* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720) + + +### Documentation + +* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c)) + +## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) + + +### Features + +* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3)) +* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330)) + + +### Bug Fixes + +* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9)) +* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + + +### Performance Improvements + +* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca)) + +## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11) + + +### Bug Fixes + +* **storage:** Add object validation case and test ([#9521](https://github.com/googleapis/google-cloud-go/issues/9521)) ([386bef3](https://github.com/googleapis/google-cloud-go/commit/386bef319b4678beaa926ddfe4edef190f11b68d)) + +## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.38.0...storage/v1.39.0) (2024-02-29) + + +### Features + +* **storage:** Make it possible to disable Content-Type sniffing ([#9431](https://github.com/googleapis/google-cloud-go/issues/9431)) ([0676670](https://github.com/googleapis/google-cloud-go/commit/067667058c06689b64401be11858d84441584039)) + +## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.37.0...storage/v1.38.0) (2024-02-12) + + +### Features + +* **storage:** Support auto-detection of access ID for external_account creds ([#9208](https://github.com/googleapis/google-cloud-go/issues/9208)) ([b958d44](https://github.com/googleapis/google-cloud-go/commit/b958d44589f2b6b226ea3bef23829ac75a0aa6a6)) +* **storage:** Support custom hostname for VirtualHostedStyle SignedURLs ([#9348](https://github.com/googleapis/google-cloud-go/issues/9348)) ([7eec40e](https://github.com/googleapis/google-cloud-go/commit/7eec40e4cf82c53e5bf02bd2c14e0b25043da6d0)) +* **storage:** Support universe domains ([#9344](https://github.com/googleapis/google-cloud-go/issues/9344)) ([29a7498](https://github.com/googleapis/google-cloud-go/commit/29a7498b8eb0d00fdb5acd7ee8ce0e5a2a8c11ce)) + + +### Bug Fixes + +* **storage:** Fix v4 url signing for hosts that specify ports ([#9347](https://github.com/googleapis/google-cloud-go/issues/9347)) ([f127b46](https://github.com/googleapis/google-cloud-go/commit/f127b4648f861c1ba44f41a280a62652620c04c2)) + + +### Documentation + +* **storage:** Indicate that gRPC is incompatible with universe domains ([#9386](https://github.com/googleapis/google-cloud-go/issues/9386)) ([e8bd85b](https://github.com/googleapis/google-cloud-go/commit/e8bd85bbce12d5f7ab87fa49d166a6a0d84bd12d)) + +## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) + + +### Features + +* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) +* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) + + +### Bug Fixes + +* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) + ## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 74799e55..560a5605 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -16,8 +16,6 @@ package storage import ( "context" - "net/http" - "reflect" "cloud.google.com/go/internal/trace" "cloud.google.com/go/storage/internal/apiv2/storagepb" @@ -162,15 +160,6 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) } -func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if a.userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) - } - setClientHeader(call.Header()) -} - func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { var rs []ACLRule for _, item := range items { diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 1059d4e8..d582a60d 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -116,6 +116,11 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { return &b.defaultObjectACL } +// BucketName returns the name of the bucket. +func (b *BucketHandle) BucketName() string { + return b.name +} + // Object returns an ObjectHandle, which provides operations on the named object. // This call does not perform any network operations such as fetching the object or verifying its existence. // Use methods on ObjectHandle to perform network operations. @@ -275,18 +280,24 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { err := json.Unmarshal(b.c.creds.JSON, &sa) if err != nil { returnErr = err - } else if sa.CredType == "impersonated_service_account" { - start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") - - if end <= start { - returnErr = errors.New("error parsing impersonated service account credentials") - } else { - return sa.SAImpersonationURL[start+1 : end], nil - } - } else if sa.CredType == "service_account" && sa.ClientEmail != "" { - return sa.ClientEmail, nil } else { - returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") + switch sa.CredType { + case "impersonated_service_account", "external_account": + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing external or impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + case "service_account": + if sa.ClientEmail != "" { + return sa.ClientEmail, nil + } + returnErr = errors.New("empty service account client email") + default: + returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported") + } } } @@ -302,7 +313,7 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { } } - return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr) } func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { @@ -473,6 +484,20 @@ type BucketAttrs struct { // cannot be modified once the bucket is created. // ObjectRetention cannot be configured or reported through the gRPC API. ObjectRetentionMode string + + // SoftDeletePolicy contains the bucket's soft delete policy, which defines + // the period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. By default, new buckets will be created with a + // 7 day retention duration. In order to fully disable soft delete, you need + // to set a policy with a RetentionDuration of 0. + SoftDeletePolicy *SoftDeletePolicy + + // HierarchicalNamespace contains the bucket's hierarchical namespace + // configuration. Hierarchical namespace enabled buckets can contain + // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. + // It cannot be modified after bucket creation time. + // UniformBucketLevelAccess must also also be enabled on the bucket. + HierarchicalNamespace *HierarchicalNamespace } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -754,12 +779,35 @@ type Autoclass struct { // TerminalStorageClass: The storage class that objects in the bucket // eventually transition to if they are not read for a certain length of // time. Valid values are NEARLINE and ARCHIVE. + // To modify TerminalStorageClass, Enabled must be set to true. TerminalStorageClass string // TerminalStorageClassUpdateTime represents the time of the most recent // update to "TerminalStorageClass". TerminalStorageClassUpdateTime time.Time } +// SoftDeletePolicy contains the bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. +type SoftDeletePolicy struct { + // EffectiveTime indicates the time from which the policy, or one with a + // greater retention, was effective. This field is read-only. + EffectiveTime time.Time + + // RetentionDuration is the amount of time that soft-deleted objects in the + // bucket will be retained and cannot be permanently deleted. + RetentionDuration time.Duration +} + +// HierarchicalNamespace contains the bucket's hierarchical namespace +// configuration. Hierarchical namespace enabled buckets can contain +// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. +type HierarchicalNamespace struct { + // Enabled indicates whether hierarchical namespace features are enabled on + // the bucket. This can only be set at bucket creation time currently. + Enabled bool +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -797,6 +845,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { RPO: toRPO(b), CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), Autoclass: toAutoclassFromRaw(b.Autoclass), + SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace), }, nil } @@ -830,6 +880,8 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based Autoclass: toAutoclassFromProto(b.GetAutoclass()), + SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace), } } @@ -885,6 +937,8 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), Autoclass: b.Autoclass.toRawAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(), } } @@ -945,6 +999,8 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), Autoclass: b.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(), } } @@ -1026,6 +1082,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: ua.RPO.String(), Autoclass: ua.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(), Labels: ua.setLabels, } } @@ -1143,9 +1200,15 @@ type BucketAttrsToUpdate struct { RPO RPO // If set, updates the autoclass configuration of the bucket. + // To disable autoclass on the bucket, set to an empty &Autoclass{}. + // To update the configuration for Autoclass.TerminalStorageClass, + // Autoclass.Enabled must also be set to true. // See https://cloud.google.com/storage/docs/using-autoclass for more information. Autoclass *Autoclass + // If set, updates the soft delete policy of the bucket. + SoftDeletePolicy *SoftDeletePolicy + // acl is the list of access control rules on the bucket. // It is unexported and only used internally by the gRPC client. // Library users should use ACLHandle methods directly. @@ -1267,6 +1330,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } + if ua.SoftDeletePolicy != nil { + if ua.SoftDeletePolicy.RetentionDuration == 0 { + rb.NullFields = append(rb.NullFields, "SoftDeletePolicy") + rb.SoftDeletePolicy = nil + } else { + rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy() + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -2047,6 +2118,92 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { } } +func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + // ForceSendFields must be set to send a zero value for RetentionDuration and disable + // soft delete. + return &raw.BucketSoftDeletePolicy{ + RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()), + ForceSendFields: []string{"RetentionDurationSeconds"}, + } +} + +func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + return &storagepb.Bucket_SoftDeletePolicy{ + RetentionDuration: durationpb.New(p.RetentionDuration), + } +} + +func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + + policy := &SoftDeletePolicy{ + RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second, + } + + // Return EffectiveTime only if parsed to a valid value. + if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil { + policy.EffectiveTime = t + } + + return policy +} + +func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + return &SoftDeletePolicy{ + EffectiveTime: p.GetEffectiveTime().AsTime(), + RetentionDuration: p.GetRetentionDuration().AsDuration(), + } +} + +func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace { + if hns == nil { + return nil + } + return &storagepb.Bucket_HierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace { + if hns == nil { + return nil + } + return &raw.BucketHierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace { + if p == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: p.Enabled, + } +} + +func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace { + if r == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: r.Enabled, + } +} + // Objects returns an iterator over the objects in the bucket that match the // Query q. If q is nil, no filtering is done. Objects will be iterated over // lexicographically by name. diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 4906b1d1..bbe89276 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -59,8 +59,9 @@ type storageClient interface { // Object metadata methods. DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) + RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) // Default Object ACL methods. @@ -182,16 +183,6 @@ type storageOption interface { Apply(s *settings) } -func withGAXOptions(opts ...gax.CallOption) storageOption { - return &gaxOption{opts} -} - -type gaxOption struct { - opts []gax.CallOption -} - -func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } - func withRetryConfig(rc *retryConfig) storageOption { return &retryOption{rc} } @@ -254,6 +245,9 @@ type openWriterParams struct { // attrs - see `Writer.ObjectAttrs`. // Required. attrs *ObjectAttrs + // forceEmptyContentType - Disables auto-detect of Content-Type + // Optional. + forceEmptyContentType bool // conds - see `Writer.o.conds`. // Optional. conds *Conditions @@ -291,6 +285,14 @@ type newRangeReaderParams struct { readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. } +type getObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + softDeleted bool +} + type updateObjectParams struct { bucket, object string uattrs *ObjectAttrsToUpdate @@ -300,6 +302,14 @@ type updateObjectParams struct { overrideRetention *bool } +type restoreObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + copySourceACL bool +} + type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 22adb744..c274c762 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -335,9 +335,10 @@ to add a [custom audit logging] header: This package includes support for the Cloud Storage gRPC API, which is currently in preview. This implementation uses gRPC rather than the current JSON & XML -APIs to make requests to Cloud Storage. If you would like to try the API, -please contact your GCP account rep for more information. The gRPC API is not -yet generally available, so it may be subject to breaking changes. +APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC +team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to +allowlist to access this API. The Go Storage gRPC library is not yet generally +available, so it may be subject to breaking changes. To create a client which will use gRPC, use the alternate constructor: @@ -349,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor: // Use client as usual. If the application is running within GCP, users may get better performance by -enabling DirectPath (enabling requests to skip some proxy steps). To enable, +enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add the following side-effect imports to your application: @@ -358,6 +359,13 @@ the following side-effect imports to your application: _ "google.golang.org/grpc/xds/googledirectpath" ) +# Storage Control API + +Certain control plane and long-running operations for Cloud Storage (including Folder +and Managed Folder operations) are supported via the autogenerated Storage Control +client, which is available as a subpackage in this module. See package docs at +[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -366,5 +374,6 @@ the following side-effect imports to your application: [impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview [custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata +[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index a51cf9c0..d81a17b6 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "net/url" "os" @@ -34,8 +35,11 @@ import ( "google.golang.org/api/option/internaloption" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -112,6 +116,8 @@ type grpcStorageClient struct { func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + // Disable all gax-level retries in favor of retry logic in the veneer client. + s.gax = append(s.gax, gax.WithRetry(nil)) config := newStorageConfig(s.clientOption...) if config.readAPIWasSet { @@ -361,6 +367,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.Autoclass != nil { fieldMask.Paths = append(fieldMask.Paths, "autoclass") } + if uattrs.SoftDeletePolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy") + } for label := range uattrs.setLabels { fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) @@ -373,6 +382,13 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. Send a get request for current attrs instead. This + // maintains consistency with JSON bucket updates. + opts = append(opts, idempotent(true)) + return c.GetBucket(ctx, bucket, conds, opts...) + } + var battrs *BucketAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) @@ -415,11 +431,17 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + SoftDeleted: it.query.SoftDeleted, } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { + // IncludeFoldersAsPrefixes is not supported for gRPC + // TODO: remove this when support is added in the proto. + if it.query.IncludeFoldersAsPrefixes { + return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") + } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { @@ -479,22 +501,25 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.GetObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, // ProjectionFull by default. ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } - if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) + } + if params.softDeleted { + req.SoftDeleted = ¶ms.softDeleted } var attrs *ObjectAttrs @@ -584,6 +609,17 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje req.UpdateMask = fieldMask + if len(fieldMask.Paths) < 1 { + // Nothing to update. To maintain consistency with JSON, we must still + // update the object because metageneration and other fields are + // updated even on an empty update. + // gRPC will fail if the fieldmask is empty, so instead we add an + // output-only field to the update mask. Output-only fields are (and must + // be - see AIP 161) ignored, but allow us to send an empty update because + // any mask that is valid for read (as this one is) must be valid for write. + fieldMask.Paths = append(fieldMask.Paths, "create_time") + } + var attrs *ObjectAttrs err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) @@ -597,6 +633,32 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje return attrs, err } +func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.RestoreObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, + CopySourceAcl: ¶ms.copySourceACL, + } + if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.RestoreObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + return attrs, err +} + // Default Object ACL methods. func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -726,7 +788,7 @@ func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } @@ -759,7 +821,7 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. // Selecting a specific generation of this object is not currently supported by the client. func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return nil, err } @@ -769,7 +831,7 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) if err != nil { return err } @@ -897,12 +959,50 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } +// bytesCodec is a grpc codec which permits receiving messages as either +// protobuf messages, or as raw []bytes. +type bytesCodec struct { + encoding.Codec +} + +func (bytesCodec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (bytesCodec) Unmarshal(data []byte, v any) error { + switch v := v.(type) { + case *[]byte: + // If gRPC could recycle the data []byte after unmarshaling (through + // buffer pools), we would need to make a copy here. + *v = data + return nil + case proto.Message: + return proto.Unmarshal(data, v) + default: + return fmt.Errorf("can not unmarshal type %T", v) + } +} + +func (bytesCodec) Name() string { + // If this isn't "", then gRPC sets the content-subtype of the call to this + // value and we get errors. + return "" +} + func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) + s.gax = append(s.gax, gax.WithGRPCOptions( + grpc.ForceCodec(bytesCodec{}), + )) + if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } @@ -918,6 +1018,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.Generation = params.gen } + var databuf []byte + // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { @@ -952,12 +1054,23 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return err } - msg, err = stream.Recv() + // Receive the message into databuf as a wire-encoded message so we can + // use a custom decoder to avoid an extra copy at the protobuf layer. + err := stream.RecvMsg(&databuf) // These types of errors show up on the Recv call, rather than the // initialization of the stream via ReadObject above. if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } + if err != nil { + return err + } + // Use a custom decoder that uses protobuf unmarshalling for all + // fields except the checksummed data. + // Subsequent receives in Read calls will skip all protobuf + // unmarshalling and directly read the content from the gRPC []byte + // response, since only the first call will contain other fields. + msg, err = readFullObjectResponse(databuf) return err }, s.retry, s.idempotent) @@ -983,6 +1096,16 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() + // Only support checksums when reading an entire object, not a range. + var ( + wantCRC uint32 + checkCRC bool + ) + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + wantCRC = checksums.GetCrc32C() + checkCRC = true + } + r = &Reader{ Attrs: ReaderObjectAttrs{ Size: size, @@ -1003,7 +1126,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange leftovers: msg.GetChecksummedData().GetContent(), settings: s, zeroRange: params.length == 0, + databuf: databuf, + wantCRC: wantCRC, + checkCRC: checkCRC, }, + checkCRC: checkCRC, } cr := msg.GetContentRange() @@ -1021,12 +1148,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange r.reader.Close() } - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - return r, nil } @@ -1401,14 +1522,37 @@ type gRPCReader struct { stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte + databuf []byte cancel context.CancelFunc settings *settings + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Update the running CRC with the data in the slice, if CRC checking was enabled. +func (r *gRPCReader) updateCRC(b []byte) { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b) + } +} + +// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled. +func (r *gRPCReader) runCRCCheck() error { + if r.checkCRC && r.gotCRC != r.wantCRC { + return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC) + } + return nil } // Read reads bytes into the user's buffer from an open gRPC stream. func (r *gRPCReader) Read(p []byte) (int, error) { - // The entire object has been read by this reader, return EOF. + // The entire object has been read by this reader, check the checksum if + // necessary and return EOF. if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } return 0, io.EOF } @@ -1417,7 +1561,7 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // using the same reader. One encounters an error and the stream is closed // and then reopened while the other routine attempts to read from it. if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") + return 0, fmt.Errorf("storage: reader has been closed") } var n int @@ -1426,12 +1570,13 @@ func (r *gRPCReader) Read(p []byte) (int, error) { if len(r.leftovers) > 0 { n = copy(p, r.leftovers) r.seen += int64(n) + r.updateCRC(p[:n]) r.leftovers = r.leftovers[n:] return n, nil } // Attempt to Recv the next message on the stream. - msg, err := r.recv() + content, err := r.recv() if err != nil { return 0, err } @@ -1443,7 +1588,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() n = copy(p[n:], content) leftover := len(content) - n if leftover > 0 { @@ -1452,10 +1596,78 @@ func (r *gRPCReader) Read(p []byte) (int, error) { r.leftovers = content[n:] } r.seen += int64(n) + r.updateCRC(p[:n]) return n, nil } +// WriteTo writes all the data requested by the Reader into w, implementing +// io.WriterTo. +func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { + // The entire object has been read by this reader, check the checksum if + // necessary and return nil. + if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } + return 0, nil + } + + // No stream to read from, either never initialized or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("storage: reader has been closed") + } + + // Track bytes written during before call. + var alreadySeen = r.seen + + // Write any leftovers to the stream. There will be some leftovers from the + // original NewRangeReader call. + if len(r.leftovers) > 0 { + // Write() will write the entire leftovers slice unless there is an error. + written, err := w.Write(r.leftovers) + r.seen += int64(written) + r.updateCRC(r.leftovers) + r.leftovers = nil + if err != nil { + return r.seen - alreadySeen, err + } + } + + // Loop and receive additional messages until the entire data is written. + for { + // Attempt to receive the next message on the stream. + // Will terminate with io.EOF once data has all come through. + // recv() handles stream reopening and retry logic so no need for retries here. + msg, err := r.recv() + if err != nil { + if err == io.EOF { + // We are done; check the checksum if necessary and return. + err = r.runCRCCheck() + } + return r.seen - alreadySeen, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + written, err := w.Write(msg) + r.seen += int64(written) + r.updateCRC(msg) + if err != nil { + return r.seen - alreadySeen, err + } + } + +} + // Close cancels the read stream's context in order for it to be closed and // collected. func (r *gRPCReader) Close() error { @@ -1466,9 +1678,10 @@ func (r *gRPCReader) Close() error { return nil } -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: +// recv attempts to Recv the next message on the stream and extract the object +// data that it contains. In the event that a retryable error is encountered, +// the stream will be closed, reopened, and RecvMsg again. +// This will attempt to Recv until one of the following is true: // // * Recv is successful // * A non-retryable error is encountered @@ -1476,8 +1689,9 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() +func (r *gRPCReader) recv() ([]byte, error) { + err := r.stream.RecvMsg(&r.databuf) + var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { shouldRetry = r.settings.retry.shouldRetry @@ -1487,10 +1701,195 @@ func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is // successful, the next logical chunk will be returned. - msg, err = r.reopenStream() + msg, err := r.reopenStream() + return msg.GetChecksummedData().GetContent(), err } - return msg, err + if err != nil { + return nil, err + } + + return readObjectResponseContent(r.databuf) +} + +// ReadObjectResponse field and subfield numbers. +const ( + checksummedDataField = protowire.Number(1) + checksummedDataContentField = protowire.Number(1) + checksummedDataCRC32CField = protowire.Number(2) + objectChecksumsField = protowire.Number(2) + contentRangeField = protowire.Number(3) + metadataField = protowire.Number(4) +) + +// readObjectResponseContent returns the checksummed_data.content field of a +// ReadObjectResponse message, or an error if the message is invalid. +// This can be used on recvs of objects after the first recv, since only the +// first message will contain non-data fields. +func readObjectResponseContent(b []byte) ([]byte, error) { + checksummedData, err := readProtoBytes(b, checksummedDataField) + if err != nil { + return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + } + content, err := readProtoBytes(checksummedData, checksummedDataContentField) + if err != nil { + return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) + } + + return content, nil +} + +// readFullObjectResponse returns the ReadObjectResponse that is encoded in the +// wire-encoded message buffer b, or an error if the message is invalid. +// This must be used on the first recv of an object as it may contain all fields +// of ReadObjectResponse, and we use or pass on those fields to the user. +// This function is essentially identical to proto.Unmarshal, except it aliases +// the data in the input []byte. If the proto library adds a feature to +// Unmarshal that does that, this function can be dropped. +func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { + msg := &storagepb.ReadObjectResponse{} + + // Loop over the entire message, extracting fields as we go. This does not + // handle field concatenation, in which the contents of a single field + // are split across multiple protobuf tags. + off := 0 + for off < len(b) { + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) + if fieldLength < 0 { + return nil, protowire.ParseError(fieldLength) + } + off += fieldLength + + // Unmarshal the field according to its type. Only fields that are not + // nil will be present. + switch { + case fieldNum == checksummedDataField && fieldType == protowire.BytesType: + // The ChecksummedData field was found. Initialize the struct. + msg.ChecksummedData = &storagepb.ChecksummedData{} + + // Get the bytes corresponding to the checksummed data. + fieldContent, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + } + off += n + + // Get the nested fields. We need to do this manually as it contains + // the object content bytes. + contentOff := 0 + for contentOff < len(fieldContent) { + gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + + switch { + case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: + // Get the content bytes. + bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Content = bytes + contentOff += n + case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Crc32C = &v + contentOff += n + default: + n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + } + } + case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: + // The field was found. Initialize the struct. + msg.ObjectChecksums = &storagepb.ObjectChecksums{} + + // Get the bytes corresponding to the checksums. + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + } + off += n + + // Unmarshal. + if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { + return nil, err + } + case fieldNum == contentRangeField && fieldType == protowire.BytesType: + msg.ContentRange = &storagepb.ContentRange{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { + return nil, err + } + case fieldNum == metadataField && fieldType == protowire.BytesType: + msg.Metadata = &storagepb.Object{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { + return nil, err + } + default: + fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) + if fieldLength < 0 { + return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) + } + off += fieldLength + } + } + + return msg, nil +} + +// readProtoBytes returns the contents of the protobuf field with number num +// and type bytes from a wire-encoded message. If the field cannot be found, +// the returned slice will be nil and no error will be returned. +// +// It does not handle field concatenation, in which the contents of a single field +// are split across multiple protobuf tags. Encoded data containing split fields +// of this form is technically permissable, but uncommon. +func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { + off := 0 + for off < len(b) { + gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + if gotNum == num && gotTyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + return b, nil + } + n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + } + return nil, nil } // reopenStream "closes" the existing stream and attempts to reopen a stream and @@ -1524,16 +1923,17 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) } return &gRPCWriter{ - buf: make([]byte, size), - c: c, - ctx: params.ctx, - reader: r, - bucket: params.bucket, - attrs: params.attrs, - conds: params.conds, - encryptionKey: params.encryptionKey, - sendCRC32C: params.sendCRC32C, - chunkSize: params.chunkSize, + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, + forceEmptyContentType: params.forceEmptyContentType, } } @@ -1552,8 +1952,9 @@ type gRPCWriter struct { encryptionKey []byte settings *settings - sendCRC32C bool - chunkSize int + sendCRC32C bool + chunkSize int + forceEmptyContentType bool // The gRPC client-stream used for sending buffers. stream storagepb.Storage_BidiWriteObjectClient @@ -1623,6 +2024,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Send a request with as many bytes as possible. // Loop until all bytes are sent. +sendBytes: // label this loop so that we can use a continue statement from a nested block for { bytesNotYetSent := recvd - sent remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize @@ -1700,10 +2102,6 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // we retry. w.stream = nil - // Drop the stream reference as a new one will need to be created if - // we can retry the upload - w.stream = nil - // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received. @@ -1717,7 +2115,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Continue sending requests, opening a new stream and resending // any bytes not yet persisted as per QueryWriteStatus - continue + continue sendBytes } } if err != nil { @@ -1732,7 +2130,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Not done sending data, do not attempt to commit it yet, loop around // and send more data. if recvd-sent > 0 { - continue + continue sendBytes } // The buffer has been uploaded and there is still more data to be @@ -1763,7 +2161,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Drop the stream reference as a new one will need to be created. w.stream = nil - continue + continue sendBytes } if err != nil { return nil, 0, err @@ -1773,7 +2171,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Retry if not all bytes were persisted. writeOffset = resp.GetPersistedSize() sent = int(writeOffset) - int(start) - continue + continue sendBytes } } else { // If the object is done uploading, close the send stream to signal @@ -1793,6 +2191,15 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st var obj *storagepb.Object for obj == nil { resp, err := w.stream.Recv() + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + w.stream = nil + continue sendBytes + } if err != nil { return nil, 0, err } @@ -1852,9 +2259,9 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { // read copies the data in the reader to the given buffer and reports how much // data was read into the buffer and if there is no more data to read (EOF). // Furthermore, if the attrs.ContentType is unset, the first bytes of content -// will be sniffed for a matching content type. +// will be sniffed for a matching content type unless forceEmptyContentType is enabled. func (w *gRPCWriter) read() (int, bool, error) { - if w.attrs.ContentType == "" { + if w.attrs.ContentType == "" && !w.forceEmptyContentType { w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) } // Set n to -1 to start the Read loop. diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 1b9fbe9d..f7811a5d 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -272,7 +272,6 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, // TODO: Remove fetch method upon integration. This method is internalized into // httpStorageClient.ListHMACKeys() as it is the only caller. call := it.raw.List(it.projectID) - setClientHeader(call.Header()) if pageToken != "" { call = call.PageToken(pageToken) } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e157e4b..0e213a66 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "net/http" @@ -74,9 +75,10 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // Prepend default options to avoid overriding options passed by the user. o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) - o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) - + o = append(o, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) // Don't error out here. The user may have passed in their own HTTP // client which does not auth with ADC or other common conventions. c, err := transport.Creds(ctx, o...) @@ -105,12 +107,12 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // Append the emulator host as default endpoint for the user o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) - o = append(o, internaloption.WithDefaultEndpoint(endpoint)) + o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint)) o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) } s.clientOption = o - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, s.clientOption...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -174,7 +176,6 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st bkt.Location = "US" } req := c.raw.Buckets.Insert(project, bkt) - setClientHeader(req.Header()) if attrs != nil && attrs.PredefinedACL != "" { req.PredefinedAcl(attrs.PredefinedACL) } @@ -205,7 +206,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt fetch := func(pageSize int, pageToken string) (token string, err error) { req := c.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) req.Projection("full") req.Prefix(it.Prefix) req.PageToken(pageToken) @@ -243,7 +243,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { s := callSettings(c.settings, opts...) req := c.raw.Buckets.Delete(bucket) - setClientHeader(req.Header()) if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { return err } @@ -257,7 +256,6 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) req := c.raw.Buckets.Get(bucket).Projection("full") - setClientHeader(req.Header()) err := applyBucketConds("httpStorageClient.GetBucket", conds, req) if err != nil { return nil, err @@ -285,7 +283,6 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat s := callSettings(c.settings, opts...) rb := uattrs.toRawBucket() req := c.raw.Buckets.Patch(bucket, rb).Projection("full") - setClientHeader(req.Header()) err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) if err != nil { return nil, err @@ -335,7 +332,9 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } fetch := func(pageSize int, pageToken string) (string, error) { req := c.raw.Objects.List(bucket) - setClientHeader(req.Header()) + if it.query.SoftDeleted { + req.SoftDeleted(it.query.SoftDeleted) + } projection := it.query.Projection if projection == ProjectionDefault { projection = ProjectionFull @@ -348,6 +347,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) req.MatchGlob(it.query.MatchGlob) + req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } @@ -406,18 +406,22 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str return err } -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) - req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) - if err := applyConds("Attrs", gen, conds, req); err != nil { + req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", params.gen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { req.UserProject(s.userProject) } - if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { return nil, err } + if params.softDeleted { + req.SoftDeleted(params.softDeleted) + } + var obj *raw.Object var err error err = run(ctx, func(ctx context.Context) error { @@ -544,6 +548,33 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje return newObject(obj), nil } +func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx) + // Do not set the generation here since it's not an optional condition; it gets set above. + if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if params.copySourceACL { + req.CopySourceAcl(params.copySourceACL) + } + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + return newObject(obj), err +} + // Default Object ACL methods. func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -629,7 +660,7 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, }, s.retry, s.idempotent) } -// configureACLCall sets the context, user project and headers on the apiary library call. +// configureACLCall sets the context and user project on the apiary library call. // This will panic if the call does not have the correct methods. func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { vc := reflect.ValueOf(call) @@ -637,7 +668,6 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H if userProject != "" { vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) } - setClientHeader(call.Header()) } // Object ACL methods. @@ -723,7 +753,6 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec return nil, err } var obj *raw.Object - setClientHeader(call.Header()) var err error retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } @@ -772,7 +801,6 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var res *raw.RewriteResponse var err error - setClientHeader(call.Header()) retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } @@ -827,17 +855,18 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa return nil, err } - // Set custom headers passed in via the context. This is only required for XML; - // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. - ctxHeaders := callctx.HeadersFromContext(ctx) - for k, vals := range ctxHeaders { - for _, v := range vals { - req.Header.Add(k, v) - } - } - reopen := readerReopen(ctx, req.Header, params, s, - func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, + func(ctx context.Context) (*http.Response, error) { + // Set custom headers passed in via the context. This is only required for XML; + // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + for _, v := range vals { + req.Header.Set(k, v) + } + } + return c.hc.Do(req.WithContext(ctx)) + }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -851,7 +880,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { call := c.raw.Objects.Get(params.bucket, params.object) - setClientHeader(call.Header()) call.Projection("full") if s.userProject != "" { @@ -883,7 +911,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage mediaOpts := []googleapi.MediaOption{ googleapi.ChunkSize(params.chunkSize), } - if c := attrs.ContentType; c != "" { + if c := attrs.ContentType; c != "" || params.forceEmptyContentType { mediaOpts = append(mediaOpts, googleapi.ContentType(c)) } if params.chunkRetryDeadline != 0 { @@ -967,7 +995,6 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { s := callSettings(c.settings, opts...) call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -988,7 +1015,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p rp := iamToStoragePolicy(policy) call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -1002,7 +1028,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { s := callSettings(c.settings, opts...) call := c.raw.Buckets.TestIamPermissions(resource, permissions) - setClientHeader(call.Header()) if s.userProject != "" { call.UserProject(s.userProject) } @@ -1051,7 +1076,6 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc } fetch := func(pageSize int, pageToken string) (token string, err error) { call := c.raw.Projects.HmacKeys.List(project) - setClientHeader(call.Header()) if pageToken != "" { call = call.PageToken(pageToken) } @@ -1216,9 +1240,12 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin } type httpReader struct { - body io.ReadCloser - seen int64 - reopen func(seen int64) (*http.Response, error) + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc } func (r *httpReader) Read(p []byte) (int, error) { @@ -1227,7 +1254,22 @@ func (r *httpReader) Read(p []byte) (int, error) { m, err := r.body.Read(p[n:]) n += m r.seen += int64(m) - if err == nil || err == io.EOF { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.checkCRC { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } return n, err } // Read failed (likely due to connection issues), but we will try to reopen @@ -1433,11 +1475,12 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen Attrs: attrs, size: size, remain: remain, - wantCRC: crc, checkCRC: checkCRC, reader: &httpReader{ - reopen: reopen, - body: body, + reopen: reopen, + body: body, + wantCRC: crc, + checkCRC: checkCRC, }, }, nil } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/BUILD.bazel b/vendor/cloud.google.com/go/storage/internal/apiv2/BUILD.bazel index f7dc1b08..fa7db64e 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/BUILD.bazel +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/BUILD.bazel @@ -20,7 +20,7 @@ go_library( "@org_golang_google_api//option", "@org_golang_google_api//option/internaloption", "@org_golang_google_api//transport/grpc", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_protobuf//proto", ], diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index c6fd4b34..415b2b58 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 8159589e..5e2a8f0a 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,9 +18,11 @@ // Cloud Storage API. // // Stop. This folder is likely not what you are looking for. This folder -// contains protocol buffer definitions for an unreleased API for accessing -// Cloud Storage. Unless told otherwise by a Google Cloud representative, do -// not use any of the contents of this folder. If you would like to use Cloud +// contains protocol buffer definitions for an API only accessible to select +// customers. Customers not participating should not depend on this file. +// Please contact Google Cloud sales if you are interested. Unless told +// otherwise by a Google Cloud representative, do not use or otherwise rely +// on any of the contents of this folder. If you would like to use Cloud // Storage, please consult our official documentation (at // https://cloud.google.com/storage/docs/apis) for details on our XML and // JSON APIs, or else consider one of our client libraries (at diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 64819950..82ec5db9 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -78,7 +78,9 @@ type CallOptions struct { func defaultGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("storage.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://storage.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), @@ -625,18 +627,16 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) } -// GetIamPolicy gets the IAM policy for a specified bucket or object. +// GetIamPolicy gets the IAM policy for a specified bucket. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket}. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } -// SetIamPolicy updates an IAM policy for the specified bucket or object. +// SetIamPolicy updates an IAM policy for the specified bucket. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket}. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -962,7 +962,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn { func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when @@ -1139,9 +1141,6 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } @@ -1169,9 +1168,6 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/BUILD.bazel b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/BUILD.bazel index 15cc4bf8..b6c9e5eb 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/BUILD.bazel +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/BUILD.bazel @@ -10,7 +10,7 @@ go_library( "//vendor/cloud.google.com/go/iam/apiv1/iampb", "@org_golang_google_genproto//googleapis/type/date", "@org_golang_google_genproto_googleapis_api//annotations", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//reflect/protoreflect", diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 3486fd15..aeb7512f 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.2 +// protoc-gen-go v1.34.2 +// protoc v4.25.3 // source: google/storage/v2/storage.proto package storagepb @@ -2016,6 +2016,7 @@ type WriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: + // // *WriteObjectRequest_UploadId // *WriteObjectRequest_WriteObjectSpec FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -2036,6 +2037,7 @@ type WriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: + // // *WriteObjectRequest_ChecksummedData Data isWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service @@ -2190,6 +2192,7 @@ type WriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *WriteObjectResponse_PersistedSize // *WriteObjectResponse_Resource WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2277,6 +2280,7 @@ type BidiWriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: + // // *BidiWriteObjectRequest_UploadId // *BidiWriteObjectRequest_WriteObjectSpec FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -2297,6 +2301,7 @@ type BidiWriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: + // // *BidiWriteObjectRequest_ChecksummedData Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service @@ -2310,12 +2315,15 @@ type BidiWriteObjectRequest struct { // covers all the bytes the server has persisted thus far and can be used to // decide what data is safe for the client to drop. Note that the object's // current size reported by the BidiWriteObjectResponse may lag behind the - // number of bytes written by the client. + // number of bytes written by the client. This field is ignored if + // `finish_write` is set to true. StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` // Persists data written on the stream, up to and including the current // message, to permanent storage. This option should be used sparingly as it // may reduce performance. Ongoing writes will periodically be persisted on - // the server even when `flush` is not set. + // the server even when `flush` is not set. This field is ignored if + // `finish_write` is set to true since there's no need to checkpoint or flush + // if this message completes the write. Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` // If `true`, this indicates that the write is complete. Sending any // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` @@ -2478,6 +2486,7 @@ type BidiWriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *BidiWriteObjectResponse_PersistedSize // *BidiWriteObjectResponse_Resource WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2608,6 +2617,9 @@ type ListObjectsRequest struct { // Optional. If true, only list all soft-deleted versions of the object. // Soft delete policy is required to set this option. SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. If true, will also include folders and managed folders (besides + // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'. + IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"` // Optional. Filter results to objects and prefixes that match this glob // pattern. See [List Objects Using // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) @@ -2724,6 +2736,13 @@ func (x *ListObjectsRequest) GetSoftDeleted() bool { return false } +func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool { + if x != nil { + return x.IncludeFoldersAsPrefixes + } + return false +} + func (x *ListObjectsRequest) GetMatchGlob() string { if x != nil { return x.MatchGlob @@ -2799,6 +2818,7 @@ type QueryWriteStatusResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *QueryWriteStatusResponse_PersistedSize // *QueryWriteStatusResponse_Resource WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -4253,6 +4273,10 @@ type Bucket struct { // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's hierarchical namespace configuration. If there is no + // configuration, the hierarchical namespace feature will be disabled and have + // no effect on the bucket. + HierarchicalNamespace *Bucket_HierarchicalNamespace `protobuf:"bytes,32,opt,name=hierarchical_namespace,json=hierarchicalNamespace,proto3" json:"hierarchical_namespace,omitempty"` // Optional. The bucket's soft delete policy. The soft delete policy prevents // soft-deleted objects from being permanently deleted. SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` @@ -4486,6 +4510,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass { return nil } +func (x *Bucket) GetHierarchicalNamespace() *Bucket_HierarchicalNamespace { + if x != nil { + return x.HierarchicalNamespace + } + return nil +} + func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { if x != nil { return x.SoftDeletePolicy @@ -5168,6 +5199,16 @@ type Object struct { CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` // A user-specified timestamp set on an object. CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + // Output only. This is the time when the object became soft-deleted. + // + // Soft-deleted objects are only accessible if a soft_delete_policy is + // enabled. Also see hard_delete_time. + SoftDeleteTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=soft_delete_time,json=softDeleteTime,proto3,oneof" json:"soft_delete_time,omitempty"` + // Output only. The time when the object will be permanently deleted. + // + // Only set when an object becomes soft-deleted with a soft_delete_policy. + // Otherwise, the object will not be accessible. + HardDeleteTime *timestamppb.Timestamp `protobuf:"bytes,29,opt,name=hard_delete_time,json=hardDeleteTime,proto3,oneof" json:"hard_delete_time,omitempty"` } func (x *Object) Reset() { @@ -5391,6 +5432,20 @@ func (x *Object) GetCustomTime() *timestamppb.Timestamp { return nil } +func (x *Object) GetSoftDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.SoftDeleteTime + } + return nil +} + +func (x *Object) GetHardDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.HardDeleteTime + } + return nil +} + // An access-control entry. type ObjectAccessControl struct { state protoimpl.MessageState @@ -5775,9 +5830,9 @@ type ContentRange struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The starting offset of the object data. + // The starting offset of the object data. This value is inclusive. Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // The ending offset of the object data. + // The ending offset of the object data. This value is exclusive. End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` // The complete length of the object data. CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` @@ -6693,6 +6748,55 @@ func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Time return nil } +// Configuration for a bucket's hierarchical namespace feature. +type Bucket_HierarchicalNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Enables the hierarchical namespace feature. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_HierarchicalNamespace) Reset() { + *x = Bucket_HierarchicalNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_HierarchicalNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_HierarchicalNamespace) ProtoMessage() {} + +func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. +func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} +} + +func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + // Settings for Uniform Bucket level access. // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. type Bucket_IamConfig_UniformBucketLevelAccess struct { @@ -6712,7 +6816,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6725,7 +6829,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6771,7 +6875,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6784,7 +6888,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6831,7 +6935,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6844,7 +6948,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6929,7 +7033,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6942,7 +7046,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7602,7 +7706,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -7633,1201 +7737,1221 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, + 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, + 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, + 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, + 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, + 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, + 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, - 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, + 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, - 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, - 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, - 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, - 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, - 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, - 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, - 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, - 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, - 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, - 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, - 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, - 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, - 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, - 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, - 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, - 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, - 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, - 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, - 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, - 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, - 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, - 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, - 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, - 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, - 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, - 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, - 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, - 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, - 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, - 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, - 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, - 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, - 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, - 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, - 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, - 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, - 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, - 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, - 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, - 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, - 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, - 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, - 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, - 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, - 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, - 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, - 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, - 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, - 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, - 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, - 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, - 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, - 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, - 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, - 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, - 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, - 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, - 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, - 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, - 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, - 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, - 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, - 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, - 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, - 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, - 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, - 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, - 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, + 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, + 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, + 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, - 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, - 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, - 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, - 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, - 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, - 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, - 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, - 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, + 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, + 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, + 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, + 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, + 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, + 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, + 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, + 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, + 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, + 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, + 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, + 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, + 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, + 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, + 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, + 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, + 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, + 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, + 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, + 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, + 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, + 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, + 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, - 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, - 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, - 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, - 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, - 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, - 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12, - 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, + 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, + 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, + 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, + 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, + 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, + 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, + 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, + 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, + 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, + 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, + 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, + 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, + 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, + 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, + 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, + 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, + 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, + 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, + 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, + 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, + 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, + 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, + 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, + 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, + 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, + 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, + 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, + 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, + 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, + 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, + 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, + 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, + 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, + 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, + 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, + 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, + 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, + 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, + 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, + 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, + 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, + 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, + 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, - 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, + 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, - 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, - 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, - 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, + 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, - 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, - 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, - 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, + 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, - 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, + 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, + 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, + 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, + 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14, - 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca, - 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, - 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, - 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, + 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, + 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, + 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, + 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, + 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, - 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, - 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, - 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, - 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, + 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8843,8 +8967,8 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) -var file_google_storage_v2_storage_proto_goTypes = []interface{}{ +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) +var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest @@ -8917,31 +9041,32 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass - nil, // 72: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 78: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 81: google.protobuf.Duration - (*date.Date)(nil), // 82: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 86: google.protobuf.Empty - (*iampb.Policy)(nil), // 87: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse + (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 73: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 79: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 82: google.protobuf.Duration + (*date.Date)(nil), // 83: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 87: google.protobuf.Empty + (*iampb.Policy)(nil), // 88: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object @@ -8951,9 +9076,9 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange @@ -8969,7 +9094,7 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object @@ -8980,19 +9105,19 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging @@ -9003,108 +9128,111 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 132, // [132:164] is the sub-list for method output_type - 100, // [100:132] is the sub-list for method input_type - 100, // [100:100] is the sub-list for extension type_name - 100, // [100:100] is the sub-list for extension extendee - 0, // [0:100] is the sub-list for field type_name + 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 135, // [135:167] is the sub-list for method output_type + 103, // [103:135] is the sub-list for method input_type + 103, // [103:103] is the sub-list for extension type_name + 103, // [103:103] is the sub-list for extension extendee + 0, // [0:103] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -9113,7 +9241,7 @@ func file_google_storage_v2_storage_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DeleteBucketRequest); i { case 0: return &v.state @@ -9125,7 +9253,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetBucketRequest); i { case 0: return &v.state @@ -9137,7 +9265,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreateBucketRequest); i { case 0: return &v.state @@ -9149,7 +9277,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ListBucketsRequest); i { case 0: return &v.state @@ -9161,7 +9289,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ListBucketsResponse); i { case 0: return &v.state @@ -9173,7 +9301,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*LockBucketRetentionPolicyRequest); i { case 0: return &v.state @@ -9185,7 +9313,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*UpdateBucketRequest); i { case 0: return &v.state @@ -9197,7 +9325,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteNotificationConfigRequest); i { case 0: return &v.state @@ -9209,7 +9337,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetNotificationConfigRequest); i { case 0: return &v.state @@ -9221,7 +9349,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*CreateNotificationConfigRequest); i { case 0: return &v.state @@ -9233,7 +9361,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationConfigsRequest); i { case 0: return &v.state @@ -9245,7 +9373,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*ListNotificationConfigsResponse); i { case 0: return &v.state @@ -9257,7 +9385,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest); i { case 0: return &v.state @@ -9269,7 +9397,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*DeleteObjectRequest); i { case 0: return &v.state @@ -9281,7 +9409,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state @@ -9293,7 +9421,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state @@ -9305,7 +9433,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state @@ -9317,7 +9445,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectRequest); i { case 0: return &v.state @@ -9329,7 +9457,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*GetObjectRequest); i { case 0: return &v.state @@ -9341,7 +9469,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectResponse); i { case 0: return &v.state @@ -9353,7 +9481,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectSpec); i { case 0: return &v.state @@ -9365,7 +9493,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectRequest); i { case 0: return &v.state @@ -9377,7 +9505,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectResponse); i { case 0: return &v.state @@ -9389,7 +9517,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state @@ -9401,7 +9529,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state @@ -9413,7 +9541,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsRequest); i { case 0: return &v.state @@ -9425,7 +9553,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state @@ -9437,7 +9565,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state @@ -9449,7 +9577,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state @@ -9461,7 +9589,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*RewriteResponse); i { case 0: return &v.state @@ -9473,7 +9601,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state @@ -9485,7 +9613,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state @@ -9497,7 +9625,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state @@ -9509,7 +9637,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state @@ -9521,7 +9649,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state @@ -9533,7 +9661,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state @@ -9545,7 +9673,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state @@ -9557,7 +9685,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state @@ -9569,7 +9697,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state @@ -9581,7 +9709,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state @@ -9593,7 +9721,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state @@ -9605,7 +9733,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state @@ -9617,7 +9745,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*ServiceConstants); i { case 0: return &v.state @@ -9629,7 +9757,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state @@ -9641,7 +9769,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*BucketAccessControl); i { case 0: return &v.state @@ -9653,7 +9781,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*ChecksummedData); i { case 0: return &v.state @@ -9665,7 +9793,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*ObjectChecksums); i { case 0: return &v.state @@ -9677,7 +9805,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state @@ -9689,7 +9817,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { switch v := v.(*NotificationConfig); i { case 0: return &v.state @@ -9701,7 +9829,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryption); i { case 0: return &v.state @@ -9713,7 +9841,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*Object); i { case 0: return &v.state @@ -9725,7 +9853,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*ObjectAccessControl); i { case 0: return &v.state @@ -9737,7 +9865,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsResponse); i { case 0: return &v.state @@ -9749,7 +9877,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*ProjectTeam); i { case 0: return &v.state @@ -9761,7 +9889,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { switch v := v.(*ServiceAccount); i { case 0: return &v.state @@ -9773,7 +9901,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { switch v := v.(*Owner); i { case 0: return &v.state @@ -9785,7 +9913,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any { switch v := v.(*ContentRange); i { case 0: return &v.state @@ -9797,7 +9925,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state @@ -9809,7 +9937,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state @@ -9821,7 +9949,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Billing); i { case 0: return &v.state @@ -9833,7 +9961,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Cors); i { case 0: return &v.state @@ -9845,7 +9973,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Encryption); i { case 0: return &v.state @@ -9857,7 +9985,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state @@ -9869,7 +9997,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state @@ -9881,7 +10009,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Logging); i { case 0: return &v.state @@ -9893,7 +10021,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v any, i int) any { switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state @@ -9905,7 +10033,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v any, i int) any { switch v := v.(*Bucket_SoftDeletePolicy); i { case 0: return &v.state @@ -9917,7 +10045,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Versioning); i { case 0: return &v.state @@ -9929,7 +10057,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Website); i { case 0: return &v.state @@ -9941,7 +10069,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v any, i int) any { switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state @@ -9953,7 +10081,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Autoclass); i { case 0: return &v.state @@ -9965,7 +10093,19 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_HierarchicalNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state @@ -9977,7 +10117,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state @@ -9989,7 +10129,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -10001,7 +10141,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -10014,55 +10154,55 @@ func file_google_storage_v2_storage_proto_init() { } } } - file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{ (*BidiWriteObjectRequest_UploadId)(nil), (*BidiWriteObjectRequest_WriteObjectSpec)(nil), (*BidiWriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{ (*BidiWriteObjectResponse_PersistedSize)(nil), (*BidiWriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 78, + NumMessages: 79, NumExtensions: 0, NumServices: 1, }, @@ -10099,15 +10239,13 @@ type StorageClient interface { ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. + // Gets the IAM policy for a specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket or object. + // Updates an IAM policy for the specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. @@ -10647,15 +10785,13 @@ type StorageServer interface { ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. + // Gets the IAM policy for a specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket or object. + // Updates an IAM policy for the specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 2d5cf890..e5b2de09 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.36.0" +const Version = "1.43.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index dc79fd88..de57b4bb 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,6 +70,9 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) + if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) + } attempts++ return !errorFunc(err), err }) @@ -81,7 +84,21 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + // TODO: remove this once the respective transport packages merge xGoogHeader. + // Also remove gl-go at that time, as it will be repeated. + hdrs := callctx.HeadersFromContext(ctx) + for _, v := range hdrs[xGoogHeaderKey] { + xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") + } + + if hdrs[xGoogHeaderKey] != nil { + // Replace the key instead of adding it, if there was anything to merge with. + hdrs[xGoogHeaderKey] = []string{xGoogHeader} + } else { + // TODO: keep this line when removing the above code. + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + } + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) return ctx } @@ -102,22 +119,20 @@ func ShouldRetry(err error) bool { if errors.Is(err, io.ErrUnexpectedEOF) { return true } + if errors.Is(err, net.ErrClosed) { + return true + } switch e := err.(type) { - case *net.OpError: - if strings.Contains(e.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } case *googleapi.Error: // Retry on 408, 429, and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: + case *net.OpError, *url.Error: // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). // Unfortunately the error type is unexported, so we resort to string // matching. - retriable := []string{"connection refused", "connection reset"} + retriable := []string{"connection refused", "connection reset", "broken pipe"} for _, s := range retriable { if strings.Contains(e.Error(), s) { return true diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 56f3e3da..1d6cfdf5 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -116,7 +116,7 @@ func toProtoNotification(n *Notification) *storagepb.NotificationConfig { } } -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") +var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) // parseNotificationTopic extracts the project and topic IDs from from the full // resource name returned by the service. If the name is malformed, it returns diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index e72ceb78..debdb0f5 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -44,10 +44,14 @@ type storageClientOption interface { ApplyStorageOpt(*storageConfig) } -// WithJSONReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the JSON API for object reads. Currently, the -// default API used for reads is XML. -// Setting this option is required to use the GenerationNotMatch condition. +// WithJSONReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage JSON API for object +// reads. Currently, the default API used for reads is XML, but JSON will +// become the default in a future release. +// +// Setting this option is required to use the GenerationNotMatch condition. We +// also recommend using JSON reads to ensure consistency with other client +// operations (all of which use JSON by default). // // Note that when this option is set, reads will return a zero date for // [ReaderObjectAttrs].LastModified and may return a different value for @@ -56,10 +60,11 @@ func WithJSONReads() option.ClientOption { return &withReadAPI{useJSON: true} } -// WithXMLReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the XML API for object reads. +// WithXMLReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage XML API for object reads. // -// This is the current default. +// This is the current default, but the default will switch to JSON in a future +// release. func WithXMLReads() option.ClientOption { return &withReadAPI{useJSON: false} } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 4673a68d..6da2432f 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -72,6 +72,12 @@ type ReaderObjectAttrs struct { // ErrObjectNotExist will be returned if the object is not found. // // The caller must call Close on the returned Reader when done reading. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { return o.NewRangeReader(ctx, 0, -1) } @@ -86,6 +92,12 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // decompressive transcoding per https://cloud.google.com/storage/docs/transcoding // that file will be served back whole, regardless of the requested range as // Google Cloud Storage dictates. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { // This span covers the life of the reader. It is closed via the context // in Reader.Close. @@ -198,9 +210,7 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) type Reader struct { Attrs ReaderObjectAttrs seen, remain, size int64 - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc + checkCRC bool // Did we check the CRC? This is now only used by tests. reader io.ReadCloser ctx context.Context @@ -218,17 +228,17 @@ func (r *Reader) Read(p []byte) (int, error) { if r.remain != -1 { r.remain -= int64(n) } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if err == io.EOF { - if r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } + return n, err +} + +// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface. +// This is called implicitly when calling io.Copy on a Reader. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + // This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an + // implementation of WriteTo. + n, err := io.Copy(w, r.reader) + if r.remain != -1 { + r.remain -= int64(n) } return n, err } diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 78ecbf0e..b6316fa6 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -117,10 +117,6 @@ type Client struct { // tc is the transport-agnostic client implemented with either gRPC or HTTP. tc storageClient - // useGRPC flags whether the client uses gRPC. This is needed while the - // integration piece is only partially complete. - // TODO: remove before merging to main. - useGRPC bool } // NewClient creates a new Google Cloud Storage client using the HTTP transport. @@ -146,8 +142,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // Prepend default options to avoid overriding options passed by the user. opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) - opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) // Don't error out here. The user may have passed in their own HTTP // client which does not auth with ADC or other common conventions. @@ -178,12 +176,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error opts = append([]option.ClientOption{ option.WithoutAuthentication(), internaloption.SkipDialSettingsValidation(), - internaloption.WithDefaultEndpoint(endpoint), + internaloption.WithDefaultEndpointTemplate(endpoint), internaloption.WithDefaultMTLSEndpoint(endpoint), }, opts...) } - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %w", err) @@ -217,6 +215,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // NewGRPCClient creates a new Storage client using the gRPC transport and API. // Client methods which have not been implemented in gRPC will return an error. // In particular, methods for Cloud Pub/Sub notifications are not supported. +// Using a non-default universe domain is also not supported with the Storage +// gRPC client. // // The storage gRPC API is still in preview and not yet publicly available. // If you would like to use the API, please first contact your GCP account rep to @@ -228,13 +228,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // You may configure the client by passing in options from the [google.golang.org/api/option] // package. func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { return nil, err } - return &Client{tc: tc, useGRPC: true}, nil + return &Client{tc: tc}, nil } // Close closes the Client. @@ -299,7 +298,11 @@ func (s pathStyle) host(hostname, bucket string) string { return "storage.googleapis.com" } -func (s virtualHostedStyle) host(_, bucket string) string { +func (s virtualHostedStyle) host(hostname, bucket string) string { + if hostname != "" { + return bucket + "." + stripScheme(hostname) + } + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return bucket + "." + stripScheme(host) } @@ -455,7 +458,7 @@ type SignedURLOptions struct { // Hostname sets the host of the signed URL. This field overrides any // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. - // Only compatible with PathStyle URLStyle. + // Only compatible with PathStyle and VirtualHostedStyle URLStyles. // Optional. Hostname string } @@ -743,7 +746,7 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st } var headersWithValue []string - headersWithValue = append(headersWithValue, "host:"+u.Host) + headersWithValue = append(headersWithValue, "host:"+u.Hostname()) headersWithValue = append(headersWithValue, opts.Headers...) if opts.ContentType != "" { headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) @@ -890,6 +893,7 @@ type ObjectHandle struct { readCompressed bool // Accept-Encoding: gzip retry *retryConfig overrideRetention *bool + softDeleted bool } // ACL provides access to the object's access control list. @@ -944,7 +948,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error return nil, err } opts := makeStorageOpts(true, o.retry, o.userProject) - return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) + return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...) } // Update updates an object with the provided attributes. See @@ -967,7 +971,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( gen: o.gen, encryptionKey: o.encryptionKey, conds: o.conds, - overrideRetention: o.overrideRetention}, opts...) + overrideRetention: o.overrideRetention, + }, opts...) } // BucketName returns the name of the bucket. @@ -1049,6 +1054,50 @@ func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { return &o2 } +// SoftDeleted returns an object handle that can be used to get an object that +// has been soft deleted. To get a soft deleted object, the generation must be +// set on the object using ObjectHandle.Generation. +// Note that an error will be returned if a live object is queried using this. +func (o *ObjectHandle) SoftDeleted() *ObjectHandle { + o2 := *o + o2.softDeleted = true + return &o2 +} + +// RestoreOptions allows you to set options when restoring an object. +type RestoreOptions struct { + /// CopySourceACL indicates whether the restored object should copy the + // access controls of the source object. Only valid for buckets with + // fine-grained access. If uniform bucket-level access is enabled, setting + // CopySourceACL will cause an error. + CopySourceACL bool +} + +// Restore will restore a soft-deleted object to a live object. +// Note that you must specify a generation to use this method. +func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + + // Since the generation is required by restore calls, we set the default to + // 0 instead of a negative value, which returns a more descriptive error. + gen := o.gen + if o.gen == defaultGen { + gen = 0 + } + + // Restore is always idempotent because Generation is a required param. + sOpts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.RestoreObject(ctx, &restoreObjectParams{ + bucket: o.bucket, + object: o.object, + gen: gen, + conds: o.conds, + copySourceACL: opts.CopySourceACL, + }, sOpts...) +} + // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -1088,6 +1137,10 @@ func (o *ObjectHandle) validate() error { if !utf8.ValidString(o.object) { return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) } + // Names . and .. are not valid; see https://cloud.google.com/storage/docs/objects#naming + if o.object == "." || o.object == ".." { + return fmt.Errorf("storage: object name %q is not valid", o.object) + } return nil } @@ -1378,6 +1431,21 @@ type ObjectAttrs struct { // Retention contains the retention configuration for this object. // ObjectRetention cannot be configured or reported through the gRPC API. Retention *ObjectRetention + + // SoftDeleteTime is the time when the object became soft-deleted. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + SoftDeleteTime time.Time + + // HardDeleteTime is the time when the object will be permanently deleted. + // Only set when an object becomes soft-deleted with a soft delete policy. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + HardDeleteTime time.Time } // ObjectRetention contains the retention configuration for this object. @@ -1482,6 +1550,8 @@ func newObject(o *raw.Object) *ObjectAttrs { CustomTime: convertTime(o.CustomTime), ComponentCount: o.ComponentCount, Retention: toObjectRetention(o.Retention), + SoftDeleteTime: convertTime(o.SoftDeleteTime), + HardDeleteTime: convertTime(o.HardDeleteTime), } } @@ -1517,6 +1587,8 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { Updated: convertProtoTime(o.GetUpdateTime()), CustomTime: convertProtoTime(o.GetCustomTime()), ComponentCount: int64(o.ComponentCount), + SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()), + HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()), } } @@ -1620,6 +1692,16 @@ type Query struct { // for syntax details. When Delimiter is set in conjunction with MatchGlob, // it must be set to /. MatchGlob string + + // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of + // prefixes returned by the query. Only applicable if Delimiter is set to /. + // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. + IncludeFoldersAsPrefixes bool + + // SoftDeleted indicates whether to list soft-deleted objects. + // If true, only objects that have been soft-deleted will be listed. + // By default, soft-deleted objects are not listed. + SoftDeleted bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1655,6 +1737,8 @@ var attrToFieldMap = map[string]string{ "CustomTime": "customTime", "ComponentCount": "componentCount", "Retention": "retention", + "HardDeleteTime": "hardDeleteTime", + "SoftDeleteTime": "softDeleteTime", } // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1687,6 +1771,8 @@ var attrToProtoFieldMap = map[string]string{ "CustomerKeySHA256": "customer_encryption", "CustomTime": "custom_time", "ComponentCount": "component_count", + "HardDeleteTime": "hard_delete_time", + "SoftDeleteTime": "soft_delete_time", // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. // "MediaLink": "mediaLink", // TODO: add object retention - b/308194853 @@ -2076,6 +2162,26 @@ func (wb *withBackoff) apply(config *retryConfig) { config.backoff = &wb.backoff } +// WithMaxAttempts configures the maximum number of times an API call can be made +// in the case of retryable errors. +// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 +// times total (initial call plus 4 retries). +// Without this setting, operations will continue retrying indefinitely +// until either the context is canceled or a deadline is reached. +func WithMaxAttempts(maxAttempts int) RetryOption { + return &withMaxAttempts{ + maxAttempts: maxAttempts, + } +} + +type withMaxAttempts struct { + maxAttempts int +} + +func (wb *withMaxAttempts) apply(config *retryConfig) { + config.maxAttempts = &wb.maxAttempts +} + // RetryPolicy describes the available policies for which operations should be // retried. The default is `RetryIdempotent`. type RetryPolicy int @@ -2148,6 +2254,7 @@ type retryConfig struct { backoff *gax.Backoff policy RetryPolicy shouldRetry func(err error) bool + maxAttempts *int } func (r *retryConfig) clone() *retryConfig { @@ -2168,6 +2275,7 @@ func (r *retryConfig) clone() *retryConfig { backoff: bo, policy: r.policy, shouldRetry: r.shouldRetry, + maxAttempts: r.maxAttempts, } } @@ -2245,7 +2353,6 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { o := makeStorageOpts(true, c.retry, "") return c.tc.GetServiceAccount(ctx, projectID, o...) - } // bucketResourceName formats the given project ID and bucketResourceName ID diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index aeb7ed41..43a0f0d1 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -88,6 +88,11 @@ type Writer struct { // cancellation. ChunkRetryDeadline time.Duration + // ForceEmptyContentType is an optional parameter that is used to disable + // auto-detection of Content-Type. By default, if a blank Content-Type + // is provided, then gax.DetermineContentType is called to sniff the type. + ForceEmptyContentType bool + // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -180,18 +185,19 @@ func (w *Writer) openWriter() (err error) { isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) params := &openWriterParams{ - ctx: w.ctx, - chunkSize: w.ChunkSize, - chunkRetryDeadline: w.ChunkRetryDeadline, - bucket: w.o.bucket, - attrs: &w.ObjectAttrs, - conds: w.o.conds, - encryptionKey: w.o.encryptionKey, - sendCRC32C: w.SendCRC32C, - donec: w.donec, - setError: w.error, - progress: w.progress, - setObj: func(o *ObjectAttrs) { w.obj = o }, + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + forceEmptyContentType: w.ForceEmptyContentType, } if err := w.ctx.Err(); err != nil { return err // short-circuit diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml similarity index 72% rename from vendor/github.com/imdario/mergo/.deepsource.toml rename to vendor/dario.cat/mergo/.deepsource.toml index 8a0681af..a8bc979e 100644 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -9,4 +9,4 @@ name = "go" enabled = true [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore similarity index 100% rename from vendor/github.com/imdario/mergo/.gitignore rename to vendor/dario.cat/mergo/.gitignore diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml similarity index 88% rename from vendor/github.com/imdario/mergo/.travis.yml rename to vendor/dario.cat/mergo/.travis.yml index dad29725..d324c43b 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/dario.cat/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/BUILD.bazel b/vendor/dario.cat/mergo/BUILD.bazel similarity index 66% rename from vendor/github.com/imdario/mergo/BUILD.bazel rename to vendor/dario.cat/mergo/BUILD.bazel index c8d591d9..6d30f9dd 100644 --- a/vendor/github.com/imdario/mergo/BUILD.bazel +++ b/vendor/dario.cat/mergo/BUILD.bazel @@ -8,7 +8,7 @@ go_library( "merge.go", "mergo.go", ], - importmap = "peridot.resf.org/vendor/github.com/imdario/mergo", - importpath = "github.com/imdario/mergo", + importmap = "peridot.resf.org/vendor/dario.cat/mergo", + importpath = "dario.cat/mergo", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md similarity index 100% rename from vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md rename to vendor/dario.cat/mergo/CODE_OF_CONDUCT.md diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md new file mode 100644 index 00000000..0a1ff9f9 --- /dev/null +++ b/vendor/dario.cat/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE similarity index 100% rename from vendor/github.com/imdario/mergo/LICENSE rename to vendor/dario.cat/mergo/LICENSE diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/dario.cat/mergo/README.md similarity index 79% rename from vendor/github.com/imdario/mergo/README.md rename to vendor/dario.cat/mergo/README.md index 876abb50..7d0cf9f3 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -1,18 +1,20 @@ # Mergo - -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] -[![GoCenter Kudos][15]][16] +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -25,8 +27,14 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,24 +44,29 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -### Important note +### Important notes -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. + +#### 0.3.9 + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild @@ -97,15 +110,17 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [janoszen/containerssh](https://github.com/janoszen/containerssh) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install - go get github.com/imdario/mergo + go get dario.cat/mergo // use in your .go code import ( - "github.com/imdario/mergo" + "dario.cat/mergo" ) ## Usage @@ -143,7 +158,7 @@ package main import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -168,7 +183,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -179,9 +194,9 @@ package main import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -218,7 +233,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,21 +241,8 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md new file mode 100644 index 00000000..a5de61f7 --- /dev/null +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/dario.cat/mergo/doc.go similarity index 88% rename from vendor/github.com/imdario/mergo/doc.go rename to vendor/dario.cat/mergo/doc.go index fcd985f9..7d96ec05 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/dario.cat/mergo/doc.go @@ -8,30 +8,36 @@ A helper to merge structs and maps in Golang. Useful for configuration default v Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -Status +# Status It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. -Important note +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). -Install +# Install Do your usual installation procedure: - go get github.com/imdario/mergo + go get dario.cat/mergo - // use in your .go code - import ( - "github.com/imdario/mergo" - ) + // use in your .go code + import ( + "dario.cat/mergo" + ) -Usage +# Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). @@ -59,7 +65,7 @@ Here is a nice example: import ( "fmt" - "github.com/imdario/mergo" + "dario.cat/mergo" ) type Foo struct { @@ -81,7 +87,7 @@ Here is a nice example: // {two 2} } -Transformers +# Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? @@ -89,9 +95,9 @@ Transformers allow to merge specific types differently than in the default behav import ( "fmt" - "github.com/imdario/mergo" - "reflect" - "time" + "dario.cat/mergo" + "reflect" + "time" ) type timeTransformer struct { @@ -127,17 +133,16 @@ Transformers allow to merge specific types differently than in the default behav // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } -Contact me +# Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario -About +# About Written by Dario Castañé: https://da.rio.hn -License +# License BSD 3-Clause license, as Go language. - */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/dario.cat/mergo/map.go similarity index 97% rename from vendor/github.com/imdario/mergo/map.go rename to vendor/dario.cat/mergo/map.go index a13a7ee4..b50d5c2a 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/dario.cat/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/dario.cat/mergo/merge.go similarity index 80% rename from vendor/github.com/imdario/mergo/merge.go rename to vendor/dario.cat/mergo/merge.go index afa84a1e..0ef9b213 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/dario.cat/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,10 +77,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -95,17 +96,22 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -157,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -189,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -239,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -257,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -270,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -321,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -339,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go similarity index 85% rename from vendor/github.com/imdario/mergo/mergo.go rename to vendor/dario.cat/mergo/mergo.go index 3cc926c7..0a721e2d 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/dario.cat/mergo/mergo.go @@ -17,10 +17,10 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: @@ -65,7 +68,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 00000000..94f480de --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 00000000..815e2066 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1,10 @@ +.vscode/ + +*.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 00000000..faedfe93 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,147 @@ +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nilerr # returns nil even with non-nil error + - thelper # test helpers without t.Helper() + - unparam # unused function params + +issues: + exclude-dirs: + - pkg/etw/sample + + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + nolintlint: + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM diff --git a/vendor/github.com/Microsoft/go-winio/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/BUILD.bazel new file mode 100644 index 00000000..fefad41b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go-winio", + srcs = [ + "backup.go", + "doc.go", + "ea.go", + "file.go", + "fileinfo.go", + "hvsock.go", + "pipe.go", + "privilege.go", + "reparse.go", + "sd.go", + "syscall.go", + "zsyscall_windows.go", + ], + importmap = "peridot.resf.org/vendor/github.com/Microsoft/go-winio", + importpath = "github.com/Microsoft/go-winio", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/Microsoft/go-winio/internal/fs", + "//vendor/github.com/Microsoft/go-winio/internal/socket", + "//vendor/github.com/Microsoft/go-winio/pkg/guid", + "//vendor/golang.org/x/sys/windows", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 00000000..ae1b4942 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE similarity index 96% rename from vendor/github.com/spf13/jwalterweatherman/LICENSE rename to vendor/github.com/Microsoft/go-winio/LICENSE index 4527efb9..b8b569d7 100644 --- a/vendor/github.com/spf13/jwalterweatherman/LICENSE +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Steve Francia +Copyright (c) 2015 Microsoft Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,5 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000..7474b4f0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,89 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. + +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. + +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Special Thanks + +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 00000000..869fdfe2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000..b54341da --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,287 @@ +//go:build windows +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "runtime" + "unicode/utf16" + + "github.com/Microsoft/go-winio/internal/fs" + "golang.org/x/sys/windows" +) + +//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId //revive:disable-line:var-naming ID, not Id + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +//nolint:revive // var-naming: ALL_CAPS +const ( + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamID struct { + StreamID uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(io.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamID + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamID, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = windows.UTF16ToString(name) + } + if wsi.StreamID == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamID{ + StreamID: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + h, err := fs.CreateFile(path, + fs.AccessMask(access), + fs.FileShareMode(share), + nil, + fs.FileCreationDisposition(createmode), + fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, + 0, + ) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 00000000..1f5bfe2d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000..e104dbdf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return ea, nb, err + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return ea, nb, err + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return ea, nb, err +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return eas, err +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000..fe82a180 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,320 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort windows.Handle + +// ioResult contains the result of an asynchronous IO operation. +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO. +type ioOperation struct { + o windows.Overlapped + ch chan ioResult +} + +func initIO() { + h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle windows.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomic.Bool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomic.Bool +} + +// makeWin32File makes a new win32File from an existing file handle. +func makeWin32File(h windows.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIO) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +// Deprecated: use NewOpenFile instead. +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return NewOpenFile(windows.Handle(h)) +} + +func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle. +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.Swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + _ = cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + windows.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed. +func (f *win32File) IsClosed() bool { + return f.closing.Load() +} + +// prepareIO prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIO() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.Load() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever. +func ioCompletionProcessor(h windows.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + return int(bytes), err + } + + if f.closing.Load() { + _ = cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.Load() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + _ = cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.Load() { + return 0, ErrTimeout + } + + var bytes uint32 + err = windows.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + return 0, io.EOF + } + return n, err +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.Load() { + return 0, ErrTimeout + } + + var bytes uint32 + err = windows.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return windows.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.Store(false) + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.Store(true) + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000..c860eb99 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,106 @@ +//go:build windows +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + _ uint32 // padding +} + +// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing +// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 +// alignment is necessary to pass this as FILE_BASIC_INFO. +type alignedFileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 + FileAttributes uint32 + _ uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &alignedFileBasicInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the + // public API of this module. The data may be unnecessarily aligned. + return (*FileBasicInfo)(unsafe.Pointer(bi)), nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is + // suitable to pass to GetFileInformationByHandleEx. + biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(&biAligned)), + uint32(unsafe.Sizeof(biAligned)), + ); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 00000000..c4fdd9d4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,582 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" + "github.com/Microsoft/go-winio/pkg/guid" +) + +const afHVSock = 34 // AF_HYPERV + +// Well known Service and VM IDs +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards + +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +var _ socket.RawSockaddr = &rawHvsockAddr{} + +// Network returns the address's network name, "hvsock". +func (*HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g := hvsockVsockServiceTemplate() // make a copy + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHVSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +var _ net.Listener = &HvsockListener{} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + windows.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + + var sock *win32File + sock, err = newHVSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + defer func() { + if err != nil { + _ = sock.Close() + } + }() + + sa := addr.raw() + err = socket.Bind(sock.handle, &sa) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = windows.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHVSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIO() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + + conn := &HvsockConn{ + sock: sock, + } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(sock.handle, + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() + if err != nil { + return nil, conn.opErr(op, err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + + sa := addr.raw() + err = socket.Bind(sock.handle, &sa) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) + } + defer sock.wg.Done() + var bytes uint32 + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + sock.handle, + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) + } + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + sock.handle, + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(sock.handle, &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + conn.sock = sock + sock = nil + + return conn, nil +} + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} + +func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +// shutdown disables sending or receiving on a socket. +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return socket.ErrSocketClosed + } + + err := windows.Shutdown(conn.sock.handle, how) + if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(windows.SHUT_RD) + if err != nil { + return conn.opErr("closeread", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(windows.SHUT_WR) + if err != nil { + return conn.opErr("closewrite", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/internal/fs/BUILD.bazel new file mode 100644 index 00000000..56159395 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "fs", + srcs = [ + "doc.go", + "fs.go", + "security.go", + "zsyscall_windows.go", + ], + importmap = "peridot.resf.org/vendor/github.com/Microsoft/go-winio/internal/fs", + importpath = "github.com/Microsoft/go-winio/internal/fs", + visibility = ["//vendor/github.com/Microsoft/go-winio:__subpackages__"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/Microsoft/go-winio/internal/stringbuffer", + "//vendor/golang.org/x/sys/windows", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 00000000..1f653881 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 00000000..0cd9621d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,262 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Used with CreateFile and NtCreateFile (and co.). +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + GENERIC_READ AccessMask = 0x8000_0000 + GENERIC_WRITE AccessMask = 0x4000_0000 + GENERIC_EXECUTE AccessMask = 0x2000_0000 + GENERIC_ALL AccessMask = 0x1000_0000 + ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// Create disposition values for NtCreate* +type NTFileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_SUPERSEDE NTFileCreationDisposition = 0x00 + FILE_OPEN NTFileCreationDisposition = 0x01 + FILE_CREATE NTFileCreationDisposition = 0x02 + FILE_OPEN_IF NTFileCreationDisposition = 0x03 + FILE_OVERWRITE NTFileCreationDisposition = 0x04 + FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 + FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winnt.h + + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +// NtCreate* functions take a dedicated CreateOptions parameter. +// +// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile +// +// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file +type NTCreateOptions uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 + FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 + FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 + FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 + + FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 + FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 + FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 + FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 + + FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 + FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 + FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 + FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 + + FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 + FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 + FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 + FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 00000000..81760ac6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 00000000..a94e234c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,61 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/internal/socket/BUILD.bazel new file mode 100644 index 00000000..fbe01736 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "socket", + srcs = [ + "rawaddr.go", + "socket.go", + "zsyscall_windows.go", + ], + importmap = "peridot.resf.org/vendor/github.com/Microsoft/go-winio/internal/socket", + importpath = "github.com/Microsoft/go-winio/internal/socket", + visibility = ["//vendor/github.com/Microsoft/go-winio:__subpackages__"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/Microsoft/go-winio/pkg/guid", + "//vendor/golang.org/x/sys/windows", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 00000000..7e82f9af --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 00000000..88580d97 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,177 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, // overlapped + 0, // completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + r1, _, e1 := syscall.SyscallN(connectExFunc.addr, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + ) + + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 00000000..e1504126 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,69 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/BUILD.bazel new file mode 100644 index 00000000..c2fbeabc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "stringbuffer", + srcs = ["wstring.go"], + importmap = "peridot.resf.org/vendor/github.com/Microsoft/go-winio/internal/stringbuffer", + importpath = "github.com/Microsoft/go-winio/internal/stringbuffer", + visibility = ["//vendor/github.com/Microsoft/go-winio:__subpackages__"], +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 00000000..42ebc019 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // already sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000..a2da6639 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,586 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" +) + +//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW +//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe +//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl + +type PipeConn interface { + net.Conn + Disconnect() error + Flush() error +} + +// type aliases for mkwinsyscall code +type ( + ntAccessMask = fs.AccessMask + ntFileShareMode = fs.FileShareMode + ntFileCreationDisposition = fs.NTFileCreationDisposition + ntFileOptions = fs.NTCreateOptions +) + +type ioStatusBlock struct { + Status, Information uintptr +} + +// typedef struct _OBJECT_ATTRIBUTES { +// ULONG Length; +// HANDLE RootDirectory; +// PUNICODE_STRING ObjectName; +// ULONG Attributes; +// PVOID SecurityDescriptor; +// PVOID SecurityQualityOfService; +// } OBJECT_ATTRIBUTES; +// +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +// typedef struct _SECURITY_DESCRIPTOR { +// BYTE Revision; +// BYTE Sbz1; +// SECURITY_DESCRIPTOR_CONTROL Control; +// PSID Owner; +// PSID Group; +// PACL Sacl; +// PACL Dacl; +// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; +// +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl +} + +type ntStatus int32 + +func (status ntStatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + ErrPipeListenerClosed = net.ErrClosed + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +var _ PipeConn = (*win32Pipe)(nil) + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) +} + +func (f *win32Pipe) Disconnect() error { + return disconnectNamedPipe(f.win32File.handle) +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { //nolint:errorlint + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { + for { + select { + case <-ctx.Done(): + return windows.Handle(0), ctx.Err() + default: + h, err := fs.CreateFile(*path, + access, + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), + 0, // template file handle + ) + if err == nil { + return h, nil + } + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() + conn, err := DialPipeContext(ctx, path) + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) +} + +// PipeImpLevel is an enumeration of impersonation levels that may be set +// when calling DialPipeAccessImpersonation. +type PipeImpLevel uint32 + +const ( + PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) + PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) + PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) + PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) +) + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) +} + +// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with +// `access` at `impLevel` until `ctx` cancellation or timeout. The other +// DialPipe* implementations use PipeImpLevelAnonymous. +func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { + var err error + var h windows.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + windows.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&windows.PIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle windows.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { + path16, err := windows.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck + oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + //todo: does `sdb` need to be allocated on the heap, or can go allocate it? + l := uint32(len(sd)) + sdb, err := windows.LocalAlloc(0, l) + if err != nil { + return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) + } + defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) + } + defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck + + sdb := &securityDescriptor{ + Revision: 1, + Control: windows.SE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= windows.FILE_PIPE_MESSAGE_TYPE + } + + disposition := fs.FILE_OPEN + access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE + if first { + disposition = fs.FILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = fs.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h windows.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + windows.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno + } + } + windows.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIO() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel new file mode 100644 index 00000000..08ec4d27 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "guid", + srcs = [ + "guid.go", + "guid_nonwindows.go", + "guid_windows.go", + "variant_string.go", + ], + importmap = "peridot.resf.org/vendor/github.com/Microsoft/go-winio/pkg/guid", + importpath = "github.com/Microsoft/go-winio/pkg/guid", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 00000000..48ce4e92 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,232 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" //nolint:gosec // not used for secure application + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122 section 4.1.1. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 // RFC 4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() //nolint:gosec // not used for secure application + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 00000000..805bd354 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 00000000..27e45ee5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 00000000..4076d313 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000..d9b90b6e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,196 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h windows.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED + + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "Could not enable privilege " + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + privileges := make([]uint64, 0, len(names)) + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p := windows.CurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(windows.SecurityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000..67d1a104 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,131 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + _ = binary.Write(&b, binary.LittleEndian, flags) + } + + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000..c3685e98 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,133 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +func (e *AccountLookupError) Unwrap() error { return e.Err } + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +func (e *SddlConversionError) Unwrap() error { return e.Err } + +// LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) + return sid, nil +} + +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + sd, err := windows.SecurityDescriptorFromString(sddl) + if err != nil { + return nil, &SddlConversionError{Sddl: sddl, Err: err} + } + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) + } + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s.String(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000..a6ca111b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,5 @@ +//go:build windows + +package winio + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 00000000..89b66eda --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,378 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) + newport = windows.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func disconnectNamedPipe(pipe windows.Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h windows.Handle) { + r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) + h = windows.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) + status = ntStatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) + status = ntStatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) + status = ntStatus(r0) + return +} + +func rtlNtStatusToDosError(status ntStatus) (winerr error) { + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go index 3ed3f435..c85e6bef 100644 --- a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -191,7 +191,7 @@ func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, return x3, y3, z3 } -//TODO: double check if it is okay +// TODO: double check if it is okay // ScalarMult returns k*(Bx,By) where k is a number in big-endian form. func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { // We have a slight problem in that the identity of the group (the @@ -239,7 +239,7 @@ func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} -//TODO: double check if it is okay +// TODO: double check if it is okay // GenerateKey returns a public/private key pair. The private key is generated // using the given reader, which must return random data. func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go index 77fb8b9a..cb6676de 100644 --- a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -1,7 +1,7 @@ // Package brainpool implements Brainpool elliptic curves. // Implementation of rcurves is from github.com/ebfe/brainpool // Note that these curves are implemented with naive, non-constant time operations -// and are likely not suitable for enviroments where timing attacks are a concern. +// and are likely not suitable for environments where timing attacks are a concern. package brainpool import ( diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go index 2d535508..7e291d6a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -80,4 +80,4 @@ func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) -} \ No newline at end of file +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go index 6b6bc7ae..3ae91d59 100644 --- a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -67,7 +67,7 @@ func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { if len(nonce) > e.nonceSize { panic("crypto/eax: Nonce too long for this instance") } - ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize) omacNonce := e.omacT(0, nonce) omacAdata := e.omacT(1, adata) @@ -85,7 +85,7 @@ func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { return ret } -func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { +func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { if len(nonce) > e.nonceSize { panic("crypto/eax: Nonce too long for this instance") } diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go index a6bdf512..affb74a7 100644 --- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -41,7 +41,7 @@ func ShiftNBytesLeft(dst, x []byte, n int) { bits := uint(n % 8) l := len(dst) for i := 0; i < l-1; i++ { - dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits)) } dst[l-1] = dst[l-1] << bits @@ -56,7 +56,6 @@ func XorBytesMut(X, Y []byte) { } } - // XorBytes assumes equal input length, puts X XOR Y into Z func XorBytes(Z, X, Y []byte) { for i := 0; i < len(X); i++ { @@ -67,10 +66,10 @@ func XorBytes(Z, X, Y []byte) { // RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) func RightXor(X, Y []byte) []byte { offset := len(X) - len(Y) - xored := make([]byte, len(X)); + xored := make([]byte, len(X)) copy(xored, X) for i := 0; i < len(Y); i++ { - xored[offset + i] ^= Y[i] + xored[offset+i] ^= Y[i] } return xored } @@ -89,4 +88,3 @@ func SliceForAppend(in []byte, n int) (head, tail []byte) { tail = head[len(in):] return } - diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index 7f78cfa7..1a6f7350 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -93,13 +93,13 @@ func NewOCBWithNonceAndTagSize( return nil, ocbError("Custom tag length exceeds blocksize") } return &ocb{ - block: block, - tagSize: tagSize, - nonceSize: nonceSize, - mask: initializeMaskTable(block), + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), reusableKtop: reusableKtop{ noncePrefix: nil, - Ktop: nil, + Ktop: nil, }, }, nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go index 84308558..330309ff 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -5,7 +5,7 @@ import ( ) // Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is -// shared accross tests. +// shared across tests. var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") var rfc7253testVectors = []struct { diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go index 5dc158f0..14a3c336 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -4,21 +4,22 @@ package ocb var rfc7253TestVectorTaglen96 = struct { key, nonce, header, plaintext, ciphertext string }{"0F0E0D0C0B0A09080706050403020100", - "BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} var rfc7253AlgorithmTest = []struct { KEYLEN, TAGLEN int - OUTPUT string }{ - {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, - {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, - {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, - {128, 96, "77A3D8E73589158D25D01209"}, - {192, 96, "05D56EAD2752C86BE6932C5E"}, - {256, 96, "5458359AC23B0CBA9E6330DD"}, - {128, 64, "192C9B7BD90BA06A"}, - {192, 64, "0066BC6E0EF34E24"}, - {256, 64, "7D4EA5D445501CBE"}, - } + OUTPUT string +}{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index 3b357e58..d7af9141 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -10,19 +10,22 @@ import ( "bufio" "bytes" "encoding/base64" - "github.com/ProtonMail/go-crypto/openpgp/errors" "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. // // The encoded form is: -// -----BEGIN Type----- -// Headers // -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// // where Headers is a possibly empty sequence of Key: Value lines. // // Since the armored data can be very large, this package presents a streaming @@ -206,12 +209,16 @@ TryNextBlock: break } - i := bytes.Index(line, []byte(": ")) + i := bytes.Index(line, []byte(":")) if i == -1 { goto TryNextBlock } lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) + var value string + if len(line) > i+2 { + value = string(line[i+2:]) + } + p.Header[lastKey] = value } p.lReader.in = r diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 6f07582c..5b6e16c1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) { // trailer. // // It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out +// +// encoding -> base64 encoder -> lineBreaker -> out type encoding struct { out io.Writer breaker *lineBreaker diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go index ec66699c..8f70b373 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go @@ -307,6 +307,10 @@ func (d *dashEscaper) Close() (err error) { sig.Hash = d.hashType sig.CreationTime = t sig.IssuerKeyId = &k.KeyId + sig.IssuerFingerprint = k.Fingerprint + sig.Notations = d.config.Notations() + sigLifetimeSecs := d.config.SigLifetime() + sig.SigLifetimeSecs = &sigLifetimeSecs if err = sig.Sign(d.hashers[i], k, d.config); err != nil { return @@ -421,12 +425,6 @@ func (b *Block) VerifySignature(keyring openpgp.KeyRing, config *packet.Config) // if the name isn't known. See RFC 4880, section 9.4. func nameOfHash(h crypto.Hash) string { switch h { - case crypto.MD5: - return "MD5" - case crypto.SHA1: - return "SHA1" - case crypto.RIPEMD160: - return "RIPEMD160" case crypto.SHA224: return "SHA224" case crypto.SHA256: @@ -435,6 +433,10 @@ func nameOfHash(h crypto.Hash) string { return "SHA384" case crypto.SHA512: return "SHA512" + case crypto.SHA3_256: + return "SHA3-256" + case crypto.SHA3_512: + return "SHA3-512" } return "" } @@ -443,12 +445,8 @@ func nameOfHash(h crypto.Hash) string { // if the name isn't known. See RFC 4880, section 9.4. func nameToHash(h string) crypto.Hash { switch h { - case "MD5": - return crypto.MD5 case "SHA1": return crypto.SHA1 - case "RIPEMD160": - return crypto.RIPEMD160 case "SHA224": return crypto.SHA224 case "SHA256": @@ -457,6 +455,10 @@ func nameToHash(h string) crypto.Hash { return crypto.SHA384 case "SHA512": return crypto.SHA512 + case "SHA3-256": + return crypto.SHA3_256 + case "SHA3-512": + return crypto.SHA3_512 } return crypto.Hash(0) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go index 0b49be4b..c895bad6 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -8,10 +8,8 @@ package ecdh import ( "bytes" - "crypto/elliptic" "errors" "io" - "math/big" "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" @@ -24,9 +22,8 @@ type KDF struct { } type PublicKey struct { - ecc.CurveType - elliptic.Curve - X, Y *big.Int + curve ecc.ECDHCurve + Point []byte KDF } @@ -35,11 +32,56 @@ type PrivateKey struct { D []byte } -func GenerateKey(c elliptic.Curve, kdf KDF, rand io.Reader) (priv *PrivateKey, err error) { +func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey { + return &PublicKey{ + curve: curve, + KDF: KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDHCurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.Point) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.Point = pk.curve.UnmarshalBytePoint(p) + if pk.Point == nil { + return errors.New("ecdh: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("ecdh: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) { priv = new(PrivateKey) - priv.PublicKey.Curve = c + priv.PublicKey.curve = c priv.PublicKey.KDF = kdf - priv.D, priv.PublicKey.X, priv.PublicKey.Y, err = elliptic.GenerateKey(c, rand) + priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand) return } @@ -56,22 +98,12 @@ func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte } m := append(msg, padding...) - if pub.CurveType == ecc.Curve25519 { - return X25519Encrypt(random, pub, m, curveOID, fingerprint) - } - - d, x, y, err := elliptic.GenerateKey(pub.Curve, random) + ephemeral, zb, err := pub.curve.Encaps(random, pub.Point) if err != nil { return nil, nil, err } - vsG = elliptic.Marshal(pub.Curve, x, y) - zbBig, _ := pub.Curve.ScalarMult(pub.X, pub.Y, d) - - byteLen := (pub.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) + vsG = pub.curve.MarshalBytePoint(ephemeral) z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) if err != nil { @@ -86,29 +118,34 @@ func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte } -func Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { - if priv.PublicKey.CurveType == ecc.Curve25519 { - return X25519Decrypt(priv, vsG, m, curveOID, fingerprint) +func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) { + var m []byte + zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D) + + // Try buildKey three times to workaround an old bug, see comments in buildKey. + for i := 0; i < 3; i++ { + var z []byte + // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );" + z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]" + m, err = keywrap.Unwrap(z, c) + if err == nil { + break + } } - x, y := elliptic.Unmarshal(priv.Curve, vsG) - zbBig, _ := priv.Curve.ScalarMult(x, y, priv.D) - byteLen := (priv.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) - - z, err := buildKey(&priv.PublicKey, zb, curveOID, fingerprint, false, false) + // Only return an error after we've tried all (required) variants of buildKey. if err != nil { return nil, err } - c, err := keywrap.Unwrap(z, m) - if err != nil { - return nil, err - } - - return c[:len(c)-int(c[len(c)-1])], nil + // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding" + // The last byte should be the length of the padding, as per PKCS5; strip it off. + return m[:len(m)-int(m[len(m)-1])], nil } func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { @@ -130,7 +167,7 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead if _, err := param.Write(fingerprint[:20]); err != nil { return nil, err } - if param.Len() - len(curveOID) != 45 { + if param.Len()-len(curveOID) != 45 { return nil, errors.New("ecdh: malformed KDF Param") } @@ -144,15 +181,19 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead j := zbLen - 1 if stripLeading { // Work around old go crypto bug where the leading zeros are missing. - for ; i < zbLen && zb[i] == 0; i++ {} + for i < zbLen && zb[i] == 0 { + i++ + } } if stripTrailing { // Work around old OpenPGP.js bug where insignificant trailing zeros in // this little-endian number are missing. // (See https://github.com/openpgpjs/openpgpjs/pull/853.) - for ; j >= 0 && zb[j] == 0; j-- {} + for j >= 0 && zb[j] == 0 { + j-- + } } - if _, err := h.Write(zb[i:j+1]); err != nil { + if _, err := h.Write(zb[i : j+1]); err != nil { return nil, err } if _, err := h.Write(param.Bytes()); err != nil { @@ -163,3 +204,7 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. } + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDH(priv.Point, priv.D) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go deleted file mode 100644 index 15b41a31..00000000 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ecdh implements ECDH encryption, suitable for OpenPGP, -// as specified in RFC 6637, section 8. -package ecdh - -import ( - "errors" - "io" - "math/big" - - "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "golang.org/x/crypto/curve25519" -) - -// Generates a private-public key-pair. -// 'priv' is a private key; a scalar belonging to the set -// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the -// curve. 'pub' is simply 'priv' * G where G is the base point. -// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. -func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { - var n, helper = new(big.Int), new(big.Int) - n.SetUint64(1) - n.Lsh(n, 252) - helper.SetString("27742317777372353535851937790883648493", 10) - n.Add(n, helper) - - for true { - _, err = io.ReadFull(rand, priv[:]) - if err != nil { - return - } - // The following ensures that the private key is a number of the form - // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of - // of the curve. - priv[0] &= 248 - priv[31] &= 127 - priv[31] |= 64 - - // If the scalar is out of range, sample another random number. - if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { - continue - } - - curve25519.ScalarBaseMult(&pub, &priv) - return - } - return -} - -// X25519GenerateKey samples the key pair according to the correct distribution. -// It also sets the given key-derivation function and returns the *PrivateKey -// object along with an error. -func X25519GenerateKey(rand io.Reader, kdf KDF) (priv *PrivateKey, err error) { - ci := ecc.FindByName("Curve25519") - priv = new(PrivateKey) - priv.PublicKey.Curve = ci.Curve - d, pubKey, err := x25519GenerateKeyPairBytes(rand) - if err != nil { - return nil, err - } - priv.PublicKey.KDF = kdf - priv.D = make([]byte, 32) - copyReversed(priv.D, d[:]) - priv.PublicKey.CurveType = ci.CurveType - priv.PublicKey.Curve = ci.Curve - /* - * Note that ECPoint.point differs from the definition of public keys in - * [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is - * more uniform with how big integers are represented in TLS, and (2) there - * is an additional length byte (so ECpoint.point is actually 33 bytes), - * again for uniformity (and extensibility). - */ - var encodedKey = make([]byte, 33) - encodedKey[0] = 0x40 - copy(encodedKey[1:], pubKey[:]) - priv.PublicKey.X = new(big.Int).SetBytes(encodedKey[:]) - priv.PublicKey.Y = new(big.Int) - return priv, nil -} - -func X25519Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { - d, ephemeralKey, err := x25519GenerateKeyPairBytes(random) - if err != nil { - return nil, nil, err - } - var pubKey [32]byte - - if pub.X.BitLen() > 33*264 { - return nil, nil, errors.New("ecdh: invalid key") - } - copy(pubKey[:], pub.X.Bytes()[1:]) - - var zb [32]byte - curve25519.ScalarBaseMult(&zb, &d) - curve25519.ScalarMult(&zb, &d, &pubKey) - z, err := buildKey(pub, zb[:], curveOID, fingerprint, false, false) - - if err != nil { - return nil, nil, err - } - - if c, err = keywrap.Wrap(z, msg); err != nil { - return nil, nil, err - } - - var vsg [33]byte - vsg[0] = 0x40 - copy(vsg[1:], ephemeralKey[:]) - - return vsg[:], c, nil -} - -func X25519Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { - var zb, d, ephemeralKey [32]byte - if len(vsG) != 33 || vsG[0] != 0x40 { - return nil, errors.New("ecdh: invalid key") - } - copy(ephemeralKey[:], vsG[1:33]) - - copyReversed(d[:], priv.D) - curve25519.ScalarBaseMult(&zb, &d) - curve25519.ScalarMult(&zb, &d, &ephemeralKey) - - var c []byte - - for i := 0; i < 3; i++ { - // Try buildKey three times for compat, see comments in buildKey. - z, err := buildKey(&priv.PublicKey, zb[:], curveOID, fingerprint, i == 1, i == 2) - if err != nil { - return nil, err - } - - res, err := keywrap.Unwrap(z, m) - if i == 2 && err != nil { - // Only return an error after we've tried all variants of buildKey. - return nil, err - } - - c = res - if err == nil { - break - } - } - - return c[:len(c)-int(c[len(c)-1])], nil -} - -func copyReversed(out []byte, in []byte) { - l := len(in) - for i := 0; i < l; i++ { - out[i] = in[l-i-1] - } -} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go new file mode 100644 index 00000000..f94ae1b2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go @@ -0,0 +1,80 @@ +// Package ecdsa implements ECDSA signature, suitable for OpenPGP, +// as specified in RFC 6637, section 5. +package ecdsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" + "math/big" +) + +type PublicKey struct { + X, Y *big.Int + curve ecc.ECDSACurve +} + +type PrivateKey struct { + PublicKey + D *big.Int +} + +func NewPublicKey(curve ecc.ECDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalIntegerPoint(pk.X, pk.Y) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p) + if pk.X == nil { + return errors.New("ecdsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalIntegerSecret() []byte { + return sk.curve.MarshalIntegerSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error { + sk.D = sk.curve.UnmarshalIntegerSecret(d) + + if sk.D == nil { + return errors.New("ecdsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand) + return +} + +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { + return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash) +} + +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + return pub.curve.Verify(pub.X, pub.Y, hash, r, s) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go new file mode 100644 index 00000000..99ecfc7f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go @@ -0,0 +1,91 @@ +// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in +// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 +package eddsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" +) + +type PublicKey struct { + X []byte + curve ecc.EdDSACurve +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.EdDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.EdDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.X) +} + +func (pk *PublicKey) UnmarshalPoint(x []byte) error { + pk.X = pk.curve.UnmarshalBytePoint(x) + + if pk.X == nil { + return errors.New("eddsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("eddsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand) + return +} + +func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) { + sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message) + if err != nil { + return nil, nil, err + } + + r, s = priv.PublicKey.curve.MarshalSignature(sig) + return +} + +func Verify(pub *PublicKey, message, r, s []byte) bool { + sig := pub.curve.UnmarshalSignature(r, s) + if sig == nil { + return false + } + + return pub.curve.Verify(pub.X, message, sig) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go index 6a07d8ff..bad27743 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -71,8 +71,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err // returns the plaintext of the message. An error can result only if the // ciphertext is invalid. Users should keep in mind that this is a padding // oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel // Bleichenbacher, Advances in Cryptology (Crypto '98), func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { s := new(big.Int).Exp(c1, priv.X, priv.P) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go new file mode 100644 index 00000000..526bd777 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go @@ -0,0 +1,24 @@ +package openpgp + +import ( + "crypto" + + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + return algorithm.HashIdToHash(id) +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + return algorithm.HashIdToString(id) +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + return algorithm.HashToHashId(h) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go index 17a1bfe9..d0670651 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -16,7 +16,7 @@ type AEADMode uint8 const ( AEADModeEAX = AEADMode(1) AEADModeOCB = AEADMode(2) - AEADModeGCM = AEADMode(100) + AEADModeGCM = AEADMode(3) ) // TagLength returns the length in bytes of authentication tags. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go index 3f1b61b8..d1a00fc7 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -32,26 +32,25 @@ type Hash interface { // The following vars mirror the crypto/Hash supported hash functions. var ( - MD5 Hash = cryptoHash{1, crypto.MD5} - SHA1 Hash = cryptoHash{2, crypto.SHA1} - RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160} - SHA256 Hash = cryptoHash{8, crypto.SHA256} - SHA384 Hash = cryptoHash{9, crypto.SHA384} - SHA512 Hash = cryptoHash{10, crypto.SHA512} - SHA224 Hash = cryptoHash{11, crypto.SHA224} + SHA1 Hash = cryptoHash{2, crypto.SHA1} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} + SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256} + SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512} ) // HashById represents the different hash functions specified for OpenPGP. See // http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 var ( HashById = map[uint8]Hash{ - MD5.Id(): MD5, - SHA1.Id(): SHA1, - RIPEMD160.Id(): RIPEMD160, - SHA256.Id(): SHA256, - SHA384.Id(): SHA384, - SHA512.Id(): SHA512, - SHA224.Id(): SHA224, + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + SHA3_256.Id(): SHA3_256, + SHA3_512.Id(): SHA3_512, } ) @@ -68,13 +67,12 @@ func (h cryptoHash) Id() uint8 { } var hashNames = map[uint8]string{ - MD5.Id(): "MD5", - SHA1.Id(): "SHA1", - RIPEMD160.Id(): "RIPEMD160", - SHA256.Id(): "SHA256", - SHA384.Id(): "SHA384", - SHA512.Id(): "SHA512", - SHA224.Id(): "SHA224", + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", + SHA3_256.Id(): "SHA3-256", + SHA3_512.Id(): "SHA3-512", } func (h cryptoHash) String() string { @@ -84,3 +82,62 @@ func (h cryptoHash) String() string { } return s } + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP +// hash id, allowing sha1. +func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + + if id == SHA1.Id() { + return SHA1.HashFunc(), true + } + + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + return 0, false +} + +// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash, +// allowing instances of SHA1 +func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + if h == SHA1.HashFunc() { + return SHA1.Id(), true + } + + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go new file mode 100644 index 00000000..888767c4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go @@ -0,0 +1,171 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" +) + +type curve25519 struct{} + +func NewCurve25519() *curve25519 { + return &curve25519{} +} + +func (c *curve25519) GetCurveName() string { + return "curve25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes the public point to native format, removing the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x25519lib.Size+1 { + return nil + } + + // Remove prefix + return point[1:] +} + +// MarshalByteSecret encodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +// Note that leading zero bytes are stripped later when encoding as an MPI. +func (c *curve25519) MarshalByteSecret(secret []byte) []byte { + d := make([]byte, x25519lib.Size) + copyReversed(d, secret) + + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // the curve. + // + // This masking is done internally in the underlying lib and so is unnecessary + // for security, but OpenPGP implementations require that private keys be + // pre-masked. + d[0] &= 127 + d[0] |= 64 + d[31] &= 248 + + return d +} + +// UnmarshalByteSecret decodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +func (c *curve25519) UnmarshalByteSecret(d []byte) []byte { + if len(d) > x25519lib.Size { + return nil + } + + // Ensure truncated leading bytes are re-added + secret := make([]byte, x25519lib.Size) + copyReversed(secret, d) + + return secret +} + +// generateKeyPairBytes Generates a private-public key-pair. +// 'priv' is a private key; a little-endian scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + + x25519lib.KeyGen(&pub, &priv) + return +} + +func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *genericCurve) MaskSecret(secret []byte) []byte { + return secret +} + +func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}" + // ephemeralPrivate corresponds to `v`. + // ephemeralPublic corresponds to `V`. + ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + + // RFC6637 §8: "Obtain the authenticated recipient public key R" + // pubKey corresponds to `R`. + var pubKey x25519lib.Key + copy(pubKey[:], point) + + // RFC6637 §8: "Compute the shared point S = vR" + // "VB = convert point V to the octet string" + // sharedPoint corresponds to `VB`. + var sharedPoint x25519lib.Key + x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey) + + return ephemeralPublic[:], sharedPoint[:], nil +} + +func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) { + var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key + // RFC6637 §8: "The decryption is the inverse of the method given." + // All quoted descriptions in comments below describe encryption, and + // the reverse is performed. + // vsG corresponds to `VB` in RFC6637 §8 . + + // RFC6637 §8: "VB = convert point V to the octet string" + copy(ephemeralPublic[:], vsG) + + // decodedPrivate corresponds to `r` in RFC6637 §8 . + copy(decodedPrivate[:], secret) + + // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating + // S = rV = rvG, where (r,R) is the recipient's key pair." + // sharedPoint corresponds to `S`. + x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic) + + return sharedPoint[:], nil +} + +func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) { + var pk, sk x25519lib.Key + copy(sk[:], secret) + x25519lib.KeyGen(&pk, &sk) + + if subtle.ConstantTimeCompare(point, pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go deleted file mode 100644 index f91042fd..00000000 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go +++ /dev/null @@ -1,118 +0,0 @@ -package ecc - -import ( - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "crypto/elliptic" - "bytes" - "github.com/ProtonMail/go-crypto/bitcurves" - "github.com/ProtonMail/go-crypto/brainpool" -) - -type SignatureAlgorithm uint8 - -const ( - ECDSA SignatureAlgorithm = 1 - EdDSA SignatureAlgorithm = 2 -) - -type CurveInfo struct { - Name string - Oid *encoding.OID - Curve elliptic.Curve - SigAlgorithm SignatureAlgorithm - CurveType CurveType -} - -var curves = []CurveInfo{ - { - Name: "NIST curve P-256", - Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), - Curve: elliptic.P256(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "NIST curve P-384", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), - Curve: elliptic.P384(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "NIST curve P-521", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), - Curve: elliptic.P521(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "SecP256k1", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), - Curve: bitcurves.S256(), - CurveType: BitCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "Curve25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), - Curve: elliptic.P256(),// filler - CurveType: Curve25519, - SigAlgorithm: ECDSA, - }, - { - Name: "Ed25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), - Curve: elliptic.P256(), // filler - CurveType: NISTCurve, - SigAlgorithm: EdDSA, - }, - { - Name: "Brainpool P256r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), - Curve: brainpool.P256r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "BrainpoolP384r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), - Curve: brainpool.P384r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "BrainpoolP512r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), - Curve: brainpool.P512r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, -} - -func FindByCurve(curve elliptic.Curve) *CurveInfo { - for _, curveInfo := range curves { - if curveInfo.Curve == curve { - return &curveInfo - } - } - return nil -} - -func FindByOid(oid encoding.Field) *CurveInfo { - var rawBytes = oid.Bytes() - for _, curveInfo := range curves { - if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { - return &curveInfo - } - } - return nil -} - -func FindByName(name string) *CurveInfo { - for _, curveInfo := range curves { - if curveInfo.Name == name { - return &curveInfo - } - } - return nil -} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go deleted file mode 100644 index de8bca0a..00000000 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go +++ /dev/null @@ -1,10 +0,0 @@ -package ecc - -type CurveType uint8 - -const ( - NISTCurve CurveType = 1 - Curve25519 CurveType = 2 - BitCurve CurveType = 3 - BrainpoolCurve CurveType = 4 -) \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go new file mode 100644 index 00000000..35751034 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -0,0 +1,140 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/elliptic" + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +type CurveInfo struct { + GenName string + Oid *encoding.OID + Curve Curve +} + +var Curves = []CurveInfo{ + { + // NIST P-256 + GenName: "P256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: NewGenericCurve(elliptic.P256()), + }, + { + // NIST P-384 + GenName: "P384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: NewGenericCurve(elliptic.P384()), + }, + { + // NIST P-521 + GenName: "P521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: NewGenericCurve(elliptic.P521()), + }, + { + // SecP256k1 + GenName: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: NewGenericCurve(bitcurves.S256()), + }, + { + // Curve25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: NewCurve25519(), + }, + { + // X448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), + Curve: NewX448(), + }, + { + // Ed25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: NewEd25519(), + }, + { + // Ed448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}), + Curve: NewEd448(), + }, + { + // BrainpoolP256r1 + GenName: "BrainpoolP256", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: NewGenericCurve(brainpool.P256r1()), + }, + { + // BrainpoolP384r1 + GenName: "BrainpoolP384", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: NewGenericCurve(brainpool.P384r1()), + }, + { + // BrainpoolP512r1 + GenName: "BrainpoolP512", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: NewGenericCurve(brainpool.P512r1()), + }, +} + +func FindByCurve(curve Curve) *CurveInfo { + for _, curveInfo := range Curves { + if curveInfo.Curve.GetCurveName() == curve.GetCurveName() { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range Curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindEdDSAByGenName(curveGenName string) EdDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(EdDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDSAByGenName(curveGenName string) ECDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDHByGenName(curveGenName string) ECDHCurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDHCurve) + if ok { + return curve + } + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go new file mode 100644 index 00000000..5ed9c93b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go @@ -0,0 +1,48 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "io" + "math/big" +) + +type Curve interface { + GetCurveName() string +} + +type ECDSACurve interface { + Curve + MarshalIntegerPoint(x, y *big.Int) []byte + UnmarshalIntegerPoint([]byte) (x, y *big.Int) + MarshalIntegerSecret(d *big.Int) []byte + UnmarshalIntegerSecret(d []byte) *big.Int + GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) + Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) + Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool + ValidateECDSA(x, y *big.Int, secret []byte) error +} + +type EdDSACurve interface { + Curve + MarshalBytePoint(x []byte) []byte + UnmarshalBytePoint([]byte) (x []byte) + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + MarshalSignature(sig []byte) (r, s []byte) + UnmarshalSignature(r, s []byte) (sig []byte) + GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) + Sign(publicKey, privateKey, message []byte) (sig []byte, err error) + Verify(publicKey, message, sig []byte) bool + ValidateEdDSA(publicKey, privateKey []byte) (err error) +} +type ECDHCurve interface { + Curve + MarshalBytePoint([]byte) (encoded []byte) + UnmarshalBytePoint(encoded []byte) []byte + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) + Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) + Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) + ValidateECDH(public []byte, secret []byte) error +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go new file mode 100644 index 00000000..54a08a8a --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -0,0 +1,112 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ed25519Size = 32 + +type ed25519 struct{} + +func NewEd25519() *ed25519 { + return &ed25519{} +} + +func (c *ed25519) GetCurveName() string { + return "ed25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalBytePoint(x []byte) []byte { + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed25519lib.PublicKeySize+1 { + return nil + } + + // Return unprefixed + return point[1:] +} + +// MarshalByteSecret encodes a scalar in native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalByteSecret(d []byte) []byte { + return d +} + +// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) { + if len(s) > ed25519lib.SeedSize { + return nil + } + + // Handle stripped leading zeroes + d = make([]byte, ed25519lib.SeedSize) + copy(d[ed25519lib.SeedSize-len(s):], s) + return +} + +// MarshalSignature splits a signature in R and S. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) { + return sig[:ed25519Size], sig[ed25519Size:] +} + +// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) { + // Check size + if len(r) > 32 || len(s) > 32 { + return nil + } + + sig = make([]byte, ed25519lib.SignatureSize) + + // Handle stripped leading zeroes + copy(sig[ed25519Size-len(r):ed25519Size], r) + copy(sig[ed25519lib.SignatureSize-len(s):], s) + return sig +} + +func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed25519lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed25519lib.SeedSize], nil +} + +func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message) + return sig, nil +} + +func (c *ed25519) Verify(publicKey, message, sig []byte) bool { + return ed25519lib.Verify(publicKey, message, sig) +} + +func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd25519Sk(publicKey, privateKey) + expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed25519 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go new file mode 100644 index 00000000..18cd8043 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -0,0 +1,111 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +type ed448 struct{} + +func NewEd448() *ed448 { + return &ed448{} +} + +func (c *ed448) GetCurveName() string { + return "ed448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalBytePoint(x []byte) []byte { + // Return prefixed + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed448lib.PublicKeySize+1 { + return nil + } + + // Strip prefix + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalByteSecret(d []byte) []byte { + // Return prefixed + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) { + // Check prefixed size + if len(s) != ed448lib.SeedSize+1 { + return nil + } + + // Strip prefix + return s[1:] +} + +// MarshalSignature splits a signature in R and S, where R is in prefixed native format and +// S is an MPI with value zero. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) { + return append([]byte{0x40}, sig...), []byte{} +} + +// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) { + if len(r) != ed448lib.SignatureSize+1 { + return nil + } + + return r[1:] +} + +func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed448lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed448lib.SeedSize], nil +} + +func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "") + + return sig, nil +} + +func (c *ed448) Verify(publicKey, message, sig []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + return ed448lib.Verify(publicKey, message, sig, "") +} + +func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd448Sk(publicKey, privateKey) + expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed448 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go new file mode 100644 index 00000000..e28d7c71 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go @@ -0,0 +1,149 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "math/big" +) + +type genericCurve struct { + Curve elliptic.Curve +} + +func NewGenericCurve(c elliptic.Curve) *genericCurve { + return &genericCurve{ + Curve: c, + } +} + +func (c *genericCurve) GetCurveName() string { + return c.Curve.Params().Name +} + +func (c *genericCurve) MarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte { + return elliptic.Marshal(c.Curve, x, y) +} + +func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) { + return elliptic.Unmarshal(c.Curve, point) +} + +func (c *genericCurve) MarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte { + return d.Bytes() +} + +func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int { + return new(big.Int).SetBytes(d) +} + +func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) { + secret, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + point = elliptic.Marshal(c.Curve, x, y) + return point, secret, nil +} + +func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) { + priv, err := ecdsa.GenerateKey(c.Curve, rand) + if err != nil { + return + } + + return priv.X, priv.Y, priv.D, nil +} + +func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + panic("invalid point") + } + + d, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + vsG := elliptic.Marshal(c.Curve, x, y) + zbBig, _ := c.Curve.ScalarMult(xP, yP, d) + + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return vsG, zb, nil +} + +func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + x, y := elliptic.Unmarshal(c.Curve, ephemeral) + zbBig, _ := c.Curve.ScalarMult(x, y, secret) + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return zb, nil +} + +func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) { + priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}} + return ecdsa.Sign(rand, priv, hash) +} + +func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool { + pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve} + return ecdsa.Verify(pub, hash, r, s) +} + +func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name)) + } + + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := c.Curve.ScalarBaseMult(secret) + if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return nil +} + +func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error { + return c.validate(xP, yP, secret) +} + +func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return c.validate(xP, yP, secret) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go new file mode 100644 index 00000000..df04262e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go @@ -0,0 +1,107 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" +) + +type x448 struct{} + +func NewX448() *x448 { + return &x448{} +} + +func (c *x448) GetCurveName() string { + return "x448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x448lib.Size+1 { + return nil + } + + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) MarshalByteSecret(d []byte) []byte { + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) UnmarshalByteSecret(d []byte) []byte { + if len(d) != x448lib.Size+1 { + return nil + } + + // Store without prefix + return d[1:] +} + +func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) { + if _, err = rand.Read(sk[:]); err != nil { + return + } + + x448lib.KeyGen(&pk, &sk) + return +} + +func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + var pk, ss x448lib.Key + seed, e, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + copy(pk[:], point) + x448lib.Shared(&ss, &seed, &pk) + + return e[:], ss[:], nil +} + +func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + var ss, sk, e x448lib.Key + + copy(sk[:], secret) + copy(e[:], ephemeral) + x448lib.Shared(&ss, &sk, &e) + + return ss[:], nil +} + +func (c *x448) ValidateECDH(point []byte, secret []byte) error { + var sk, pk, expectedPk x448lib.Key + + copy(pk[:], point) + copy(sk[:], secret) + x448lib.KeyGen(&expectedPk, &sk) + + if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go index ee39fd6b..c9df9fe2 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -32,7 +32,7 @@ func NewOID(bytes []byte) *OID { panic("encoding: NewOID argument length is reserved") default: if len(bytes) > maxOID { - panic("encoding: NewOID argment too large") + panic("encoding: NewOID argument too large") } } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0952d8c7..0e71934c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -11,12 +11,15 @@ import ( goerrors "errors" "io" "math/big" + "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/packet" - "golang.org/x/crypto/ed25519" ) // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a @@ -27,11 +30,6 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - // Generate a primary signing key primaryPrivRaw, err := newSigner(config) if err != nil { @@ -42,28 +40,66 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err primary.UpgradeToV5() } - isPrimaryId := true - selfSignature := &packet.Signature{ - Version: primary.PublicKey.Version, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: primary.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - IssuerKeyId: &primary.PublicKey.KeyId, - IssuerFingerprint: primary.PublicKey.Fingerprint, - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - MDC: true, // true by default, see 5.8 vs. 5.14 - AEAD: config.AEAD() != nil, - V5Keys: config != nil && config.V5Keys, + e := &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: make(map[string]*Identity), + Subkeys: []Subkey{}, } + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + if err != nil { + return nil, err + } + + // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + err = e.addEncryptionSubkey(config, creationTime, 0) + if err != nil { + return nil, err + } + + return e, nil +} + +func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + + isPrimaryId := len(t.Identities) == 0 + + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + selfSignature.CreationTime = creationTime + selfSignature.KeyLifetimeSecs = &keyLifetimeSecs + selfSignature.IsPrimaryId = &isPrimaryId + selfSignature.FlagsValid = true + selfSignature.FlagSign = true + selfSignature.FlagCertify = true + selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 + selfSignature.SEIPDv2 = config.AEAD() != nil + // Set the PreferredHash for the SelfSignature from the packet.Config. // If it is not the must-implement algorithm from rfc4880bis, append that. - selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())} + hash, ok := algorithm.HashToHashId(config.Hash()) + if !ok { + return errors.UnsupportedError("unsupported preferred hash function") + } + + selfSignature.PreferredHash = []uint8{hash} if config.Hash() != crypto.SHA256 { selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) } @@ -84,66 +120,30 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err } // And for DefaultMode. - selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} - if config.AEAD().Mode() != packet.AEADModeEAX { - selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX)) + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } + + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } } // User ID binding signature - err = selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) if err != nil { - return nil, err + return err } - - // Generate an encryption subkey - subPrivRaw, err := newDecrypter(config) - if err != nil { - return nil, err + t.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, } - sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) - sub.IsSubkey = true - sub.PublicKey.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() - } - - // NOTE: No KeyLifetimeSecs here, but we will not return this subkey in EncryptionKey() - // if the primary/master key has expired. - subKey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - Sig: &packet.Signature{ - Version: primary.PublicKey.Version, - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: primary.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &primary.PublicKey.KeyId, - }, - } - - // Subkey binding signature - err = subKey.Sig.SignKey(subKey.PublicKey, primary, config) - if err != nil { - return nil, err - } - - return &Entity{ - PrimaryKey: &primary.PublicKey, - PrivateKey: primary, - Identities: map[string]*Identity{ - uid.Id: &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: selfSignature, - Signatures: []*packet.Signature{selfSignature}, - }, - }, - Subkeys: []Subkey{subKey}, - }, nil + return nil } // AddSigningSubkey adds a signing keypair as a subkey to the Entity. @@ -157,42 +157,30 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { return err } sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } subkey := Subkey{ PublicKey: &sub.PublicKey, PrivateKey: sub, - Sig: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagSign: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - EmbeddedSignature: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - SigType: packet.SigTypePrimaryKeyBinding, - PubKeyAlgo: sub.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - }, - } - if config != nil && config.V5Keys { - subkey.PublicKey.UpgradeToV5() } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagSign = true + subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config) + subkey.Sig.EmbeddedSignature.CreationTime = creationTime err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) if err != nil { return err } - subkey.PublicKey.IsSubkey = true - subkey.PrivateKey.IsSubkey = true - if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { return err } @@ -205,36 +193,33 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() + return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs) +} +func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { subPrivRaw, err := newDecrypter(config) if err != nil { return err } sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } subkey := Subkey{ PublicKey: &sub.PublicKey, PrivateKey: sub, - Sig: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - if config != nil && config.V5Keys { - subkey.PublicKey.UpgradeToV5() } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagEncryptStorage = true + subkey.Sig.FlagEncryptCommunications = true - subkey.PublicKey.IsSubkey = true - subkey.PrivateKey.IsSubkey = true - if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { return err } @@ -243,7 +228,7 @@ func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { } // Generates a signing key -func newSigner(config *packet.Config) (signer crypto.Signer, err error) { +func newSigner(config *packet.Config) (signer interface{}, err error) { switch config.PublicKeyAlgorithm() { case packet.PubKeyAlgoRSA: bits := config.RSAModulusBits() @@ -257,11 +242,27 @@ func newSigner(config *packet.Config) (signer crypto.Signer, err error) { } return rsa.GenerateKey(config.Random(), bits) case packet.PubKeyAlgoEdDSA: - _, priv, err := ed25519.GenerateKey(config.Random()) + curve := ecc.FindEdDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := eddsa.GenerateKey(config.Random(), curve) if err != nil { return nil, err } - return &priv, nil + return priv, nil + case packet.PubKeyAlgoECDSA: + curve := ecc.FindECDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := ecdsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -281,14 +282,18 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) } return rsa.GenerateKey(config.Random(), bits) - case packet.PubKeyAlgoEdDSA: - fallthrough // When passing EdDSA, we generate an ECDH subkey + case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: + fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey case packet.PubKeyAlgoECDH: var kdf = ecdh.KDF{ Hash: algorithm.SHA512, Cipher: algorithm.AES256, } - return ecdh.X25519GenerateKey(config.Random(), kdf) + curve := ecc.FindECDHByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + return ecdh.GenerateKey(config.Random(), curve, kdf) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index cef3613b..2d7b0cf3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -87,25 +87,29 @@ func (e *Entity) PrimaryIdentity() *Identity { } func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { - if (existingId == nil) { + if existingId == nil { return true } - if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + if len(existingId.Revocations) > len(potentialNewId.Revocations) { return true } - if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + if len(existingId.Revocations) < len(potentialNewId.Revocations) { return false } - if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && - !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + if existingId.SelfSignature == nil { + return true + } + + if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { return false } - if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && - potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId { return true } @@ -118,6 +122,7 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... i := e.PrimaryIdentity() if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature i.SelfSignature.SigExpired(now) || // user ID self-signature has expired e.Revoked(now) || // primary key has been revoked i.Revoked(now) { // user ID has been revoked @@ -145,11 +150,9 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt with, then we can obviously use it. - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + // If we don't have any subkeys for encryption and the primary key + // is marked as OK to encrypt with, then we can use it. + if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() { return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } @@ -157,6 +160,18 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { return Key{}, false } +// CertificationKey return the best candidate Key for certifying a key with this +// Entity. +func (e *Entity) CertificationKey(now time.Time) (Key, bool) { + return e.CertificationKeyById(now, 0) +} + +// CertificationKeyById return the Key for key certification with this +// Entity and keyID. +func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify) +} + // SigningKey return the best candidate Key for signing a message with this // Entity. func (e *Entity) SigningKey(now time.Time) (Key, bool) { @@ -166,9 +181,14 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) { // SigningKeyById return the Key for signing a message with this // Entity and keyID. func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign) +} + +func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { // Fail to find any signing key if the... i := e.PrimaryIdentity() if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature i.SelfSignature.SigExpired(now) || // user ID self-signature has expired e.Revoked(now) || // primary key has been revoked i.Revoked(now) { // user ID has been revoked @@ -180,7 +200,8 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { var maxTime time.Time for idx, subkey := range e.Subkeys { if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && + (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) && subkey.PublicKey.PubKeyAlgo.CanSign() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && !subkey.Sig.SigExpired(now) && @@ -197,10 +218,11 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + // If we don't have any subkeys for signing and the primary key + // is marked as OK to sign with, then we can use it. + if i.SelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && e.PrimaryKey.PubKeyAlgo.CanSign() && (id == 0 || e.PrimaryKey.KeyId == id) { return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true @@ -230,6 +252,44 @@ func (e *Entity) Revoked(now time.Time) bool { return revoked(e.Revocations, now) } +// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key +// derived from the provided passphrase. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error { + var keysToEncrypt []*packet.PrivateKey + // Add entity private key to encrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + } + + // Add subkeys to encrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, sub.PrivateKey) + } + } + return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) +} + +// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. +// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { + var keysToDecrypt []*packet.PrivateKey + // Add entity private key to decrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, e.PrivateKey) + } + + // Add subkeys to decrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + } + } + return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) +} + // Revoked returns whether the identity has been revoked by a self-signature. // Note that third-party revocation signatures are not supported. func (i *Identity) Revoked(now time.Time) bool { @@ -277,7 +337,11 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if key.SelfSignature.FlagsValid && requiredUsage != 0 { + if requiredUsage != 0 { + if key.SelfSignature == nil || !key.SelfSignature.FlagsValid { + continue + } + var usage byte if key.SelfSignature.FlagCertify { usage |= packet.KeyFlagCertify @@ -305,7 +369,7 @@ func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { func (el EntityList) DecryptionKeys() (keys []Key) { for _, e := range el { for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } @@ -440,7 +504,7 @@ EachPacket: // Else, ignoring the signature as it does not follow anything // we would know to attach it to. case *packet.PrivateKey: - if pkt.IsSubkey == false { + if !pkt.IsSubkey { packets.Unread(p) break EachPacket } @@ -449,7 +513,7 @@ EachPacket: return nil, err } case *packet.PublicKey: - if pkt.IsSubkey == false { + if !pkt.IsSubkey { packets.Unread(p) break EachPacket } @@ -509,7 +573,6 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { return errors.StructuralError("user ID signature with wrong type") } - if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { return errors.StructuralError("user ID self-signature invalid: " + err.Error()) @@ -615,21 +678,20 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return } if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) if err != nil { return } } - for _, revocation := range ident.Revocations { - err := revocation.Serialize(w) + for _, sig := range ident.Signatures { + err = sig.Serialize(w) if err != nil { return err } } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } } for _, subkey := range e.Subkeys { err = subkey.PrivateKey.Serialize(w) @@ -713,26 +775,31 @@ func (e *Entity) Serialize(w io.Writer) error { // necessary. // If config is nil, sensible defaults will be used. func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") + certificationKey, ok := signer.CertificationKey(config.Now()) + if !ok { + return errors.InvalidArgumentError("no valid certification key found") } - if signer.PrivateKey.Encrypted { + + if certificationKey.PrivateKey.Encrypted { return errors.InvalidArgumentError("signing Entity's private key must be decrypted") } + ident, ok := e.Identities[identity] if !ok { return errors.InvalidArgumentError("given identity string not found in Entity") } - sig := &packet.Signature{ - Version: signer.PrivateKey.Version, - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, + sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config) + + signingUserID := config.SigningUserId() + if signingUserID != "" { + if _, ok := signer.Identities[signingUserID]; !ok { + return errors.InvalidArgumentError("signer identity string not found in signer Entity") + } + sig.SignerUserId = &signingUserID } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + + if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil { return err } ident.Signatures = append(ident.Signatures, sig) @@ -743,16 +810,9 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co // specified reason code and text (RFC4880 section-5.2.3.23). // If config is nil, sensible defaults will be used. func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - revSig := &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: config.Now(), - SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - RevocationReason: &reason, - RevocationReasonText: reasonText, - IssuerKeyId: &e.PrimaryKey.KeyId, - } + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { return err @@ -769,16 +829,9 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea return errors.InvalidArgumentError("given subkey is not associated with this key") } - revSig := &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: config.Now(), - SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - RevocationReason: &reason, - RevocationReasonText: reasonText, - IssuerKeyId: &e.PrimaryKey.KeyId, - } + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { return err diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go index a2219e64..108fd096 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -398,3 +398,141 @@ sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== =AmgT -----END PGP PUBLIC KEY BLOCK-----` + +const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP +kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p +oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr +owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a +DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT +y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX +FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j +bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab +k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E +K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L +sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV ++RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB +cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1 +0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr +4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68 +VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe +88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/ +sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg +/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru +J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY +VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO +BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ +CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE +89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC +sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+ +ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk +b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF +r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D +xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4 +pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B +OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or +fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8 +JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8 +Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9 +hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL +L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO +apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf +Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM +RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC +JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7 +5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq +8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht +crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W +skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC +92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/ +QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl +J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu +DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w +tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM +AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM +8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH +5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB +cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1 +HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC +C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA= +=u442 +-----END PGP PRIVATE KEY BLOCK-----` + +const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH +X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh +bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo +h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA +8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV +AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+ +sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5 +qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf +iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ +SRXn8DmCTfEB +=cELM +-----END PGP PRIVATE KEY BLOCK-----` + +const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52 + +xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV +MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH +gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD +BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv +bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY +akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J +qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b +mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe +c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ +BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx +dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI +ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq +RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA +8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ +YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn +M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU +EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa +HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA +bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ +EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz +YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7 +UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq +OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2 +JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb +PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i +U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO +Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK +1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF +CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA +MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb +huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA +HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB +QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD +l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02 +XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d +QgqsfguR1PqPuJxpXV4bSr6CGAAAAA== +=MSvh +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR +ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u +zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI +CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA +AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B +FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG +wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5 +8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT +z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X +joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F +RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev +VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP +=Z8YJ +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go index 7350974e..fec41a0e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -4,6 +4,14 @@ package packet import "math/bits" +// CipherSuite contains a combination of Cipher and Mode +type CipherSuite struct { + // The cipher function + Cipher CipherFunction + // The AEAD mode of operation. + Mode AEADMode +} + // AEADConfig collects a number of AEAD parameters along with sensible defaults. // A nil AEADConfig is valid and results in all default values. type AEADConfig struct { @@ -15,12 +23,13 @@ type AEADConfig struct { // Mode returns the AEAD mode of operation. func (conf *AEADConfig) Mode() AEADMode { + // If no preference is specified, OCB is used (which is mandatory to implement). if conf == nil || conf.DefaultMode == 0 { - return AEADModeEAX + return AEADModeOCB } + mode := conf.DefaultMode - if mode != AEADModeEAX && mode != AEADModeOCB && - mode != AEADModeExperimentalGCM { + if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM { panic("AEAD mode unsupported") } return mode @@ -28,6 +37,8 @@ func (conf *AEADConfig) Mode() AEADMode { // ChunkSizeByte returns the byte indicating the chunk size. The effective // chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +// limit to 16 = 4 MiB +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 func (conf *AEADConfig) ChunkSizeByte() byte { if conf == nil || conf.ChunkSize == 0 { return 12 // 1 << (12 + 6) == 262144 bytes @@ -38,8 +49,8 @@ func (conf *AEADConfig) ChunkSizeByte() byte { switch { case exponent < 6: exponent = 6 - case exponent > 27: - exponent = 27 + case exponent > 16: + exponent = 16 } return byte(exponent - 6) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go new file mode 100644 index 00000000..cee83bdc --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -0,0 +1,264 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes across chunks +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { + return append(wo.initialNonce, wo.chunkIndex...) + } + + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex performs an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + return nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It accesses peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(ar.associatedData, ar.chunkIndex...) + } + + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, ... + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(ar.associatedData, ar.chunkIndex...) + } + + // ... and total number of encrypted octets + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size... + adata := aw.associatedData + + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(aw.associatedData, aw.chunkIndex...) + } + + // ... and total number of encrypted octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed)) + adata = append(adata, amountBytes...) + + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := aw.associatedData + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(aw.associatedData, aw.chunkIndex...) + } + + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go index 85c53f08..98bd876b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -3,17 +3,14 @@ package packet import ( - "bytes" - "crypto/cipher" - "crypto/rand" - "encoding/binary" "io" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" ) -// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16). +// AEADEncrypted represents an AEAD Encrypted Packet. +// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t type AEADEncrypted struct { cipher CipherFunction mode AEADMode @@ -25,33 +22,6 @@ type AEADEncrypted struct { // Only currently defined version const aeadEncryptedVersion = 1 -// An AEAD opener/sealer, its configuration, and data for en/decryption. -type aeadCrypter struct { - aead cipher.AEAD - chunkSize int - initialNonce []byte - associatedData []byte // Chunk-independent associated data - chunkIndex []byte // Chunk counter - bytesProcessed int // Amount of plaintext bytes encrypted/decrypted - buffer bytes.Buffer // Buffered bytes accross chunks -} - -// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according -// to the AEAD block size, and buffers the extra encrypted bytes for next write. -type aeadEncrypter struct { - aeadCrypter // Embedded plaintext sealer - writer io.WriteCloser // 'writer' is a partialLengthWriter -} - -// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when -// necessary, similar to aeadEncrypter. -type aeadDecrypter struct { - aeadCrypter // Embedded ciphertext opener - reader io.Reader // 'reader' is a partialLengthReader - peekedBytes []byte // Used to detect last chunk - eof bool -} - func (ae *AEADEncrypted) parse(buf io.Reader) error { headerData := make([]byte, 4) if n, err := io.ReadFull(buf, headerData); n < 4 { @@ -59,10 +29,14 @@ func (ae *AEADEncrypted) parse(buf io.Reader) error { } // Read initial nonce mode := AEADMode(headerData[2]) - nonceLen := mode.NonceLength() - if nonceLen == 0 { + nonceLen := mode.IvLength() + + // This packet supports only EAX and OCB + // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t + if nonceLen == 0 || mode > AEADModeOCB { return errors.AEADError("unknown mode") } + initialNonce := make([]byte, nonceLen) if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { return errors.AEADError("could not read aead nonce:" + err.Error()) @@ -75,7 +49,7 @@ func (ae *AEADEncrypted) parse(buf io.Reader) error { } ae.cipher = CipherFunction(c) ae.mode = mode - ae.chunkSizeByte = byte(headerData[3]) + ae.chunkSizeByte = headerData[3] return nil } @@ -105,225 +79,13 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { initialNonce: ae.initialNonce, associatedData: ae.associatedData(), chunkIndex: make([]byte, 8), + packetTag: packetTypeAEADEncrypted, }, reader: ae.Contents, peekedBytes: peekedBytes}, nil } -// Read decrypts bytes and reads them into dst. It decrypts when necessary and -// buffers extra decrypted bytes. It returns the number of bytes copied into dst -// and an error. -func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { - // Return buffered plaintext bytes from previous calls - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF - } - - // Read a chunk - tagLen := ar.aead.Overhead() - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { - return 0, errRead - } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } - - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return n, errChunk - } - ar.eof = true // Mark EOF for when we've returned all buffered data - } - return -} - -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. -func (ar *aeadDecrypter) Close() (err error) { - return nil -} - -// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer. -// This writer encrypts and writes bytes (see aeadEncrypter.Write()). -func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) { - writeCloser := noOpCloser{w} - writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted) - if err != nil { - return nil, err - } - - // Data for en/decryption: tag, version, cipher, aead mode, chunk size - aeadConf := config.AEAD() - prefix := []byte{ - 0xD4, - aeadEncryptedVersion, - byte(config.Cipher()), - byte(aeadConf.Mode()), - aeadConf.ChunkSizeByte(), - } - n, err := writer.Write(prefix[1:]) - if err != nil || n < 4 { - return nil, errors.AEADError("could not write AEAD headers") - } - // Sample nonce - nonceLen := aeadConf.Mode().NonceLength() - nonce := make([]byte, nonceLen) - n, err = rand.Read(nonce) - if err != nil { - panic("Could not sample random nonce") - } - _, err = writer.Write(nonce) - if err != nil { - return nil, err - } - blockCipher := CipherFunction(config.Cipher()).new(key) - alg := AEADMode(aeadConf.Mode()).new(blockCipher) - - chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte()) - return &aeadEncrypter{ - aeadCrypter: aeadCrypter{ - aead: alg, - chunkSize: chunkSize, - associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, - }, - writer: writer}, nil -} - -// Write encrypts and writes bytes. It encrypts when necessary and buffers extra -// plaintext bytes for next call. When the stream is finished, Close() MUST be -// called to append the final tag. -func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err - } - } - return -} - -// Close encrypts and writes the remaining buffered plaintext if any, appends -// the final authentication tag, and closes the embedded writer. This function -// MUST be called at the end of a stream. -func (aw *aeadEncrypter) Close() (err error) { - // Encrypt and write a chunk if there's buffered data left, or if we haven't - // written any chunks yet. - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return err - } - _, err = aw.writer.Write(lastEncryptedChunk) - if err != nil { - return err - } - } - // Compute final tag (associated data: packet tag, version, cipher, aead, - // chunk size, index, total number of encrypted octets). - adata := append(aw.associatedData[:], aw.chunkIndex[:]...) - adata = append(adata, make([]byte, 8)...) - binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed)) - nonce := aw.computeNextNonce() - finalTag := aw.aead.Seal(nil, nonce, nil, adata) - _, err = aw.writer.Write(finalTag) - if err != nil { - return err - } - return aw.writer.Close() -} - -// sealChunk Encrypts and authenticates the given chunk. -func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { - if len(data) > aw.chunkSize { - return nil, errors.AEADError("chunk exceeds maximum length") - } - if aw.associatedData == nil { - return nil, errors.AEADError("can't seal without headers") - } - adata := append(aw.associatedData, aw.chunkIndex...) - nonce := aw.computeNextNonce() - encrypted := aw.aead.Seal(nil, nonce, data, adata) - aw.bytesProcessed += len(data) - if err := aw.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return encrypted, nil -} - -// openChunk decrypts and checks integrity of an encrypted chunk, returning -// the underlying plaintext and an error. It access peeked bytes from next -// chunk, to identify the last chunk and decrypt/validate accordingly. -func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - adata := append(ar.associatedData, ar.chunkIndex...) - nonce := ar.computeNextNonce() - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) - if err != nil { - return nil, err - } - ar.bytesProcessed += len(plainChunk) - if err = ar.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return plainChunk, nil -} - -// Checks the summary tag. It takes into account the total decrypted bytes into -// the associated data. It returns an error, or nil if the tag is valid. -func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { - // Associated: tag, version, cipher, aead, chunk size, index, and octets - amountBytes := make([]byte, 8) - binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) - adata := append(ar.associatedData, ar.chunkIndex...) - adata = append(adata, amountBytes...) - nonce := ar.computeNextNonce() - _, err := ar.aead.Open(nil, nonce, tag, adata) - if err != nil { - return err - } - return nil -} - -// Associated data for chunks: tag, version, cipher, mode, chunk size byte +// associatedData for chunks: tag, version, cipher, mode, chunk size byte func (ae *AEADEncrypted) associatedData() []byte { return []byte{ 0xD4, @@ -332,33 +94,3 @@ func (ae *AEADEncrypted) associatedData() []byte { byte(ae.mode), ae.chunkSizeByte} } - -// computeNonce takes the incremental index and computes an eXclusive OR with -// the least significant 8 bytes of the receivers' initial nonce (see sec. -// 5.16.1 and 5.16.2). It returns the resulting nonce. -func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 - for i := 0; i < 8; i++ { - nonce[i+offset] ^= wo.chunkIndex[i] - } - return -} - -// incrementIndex perfoms an integer increment by 1 of the integer represented by the -// slice, modifying it accordingly. -func (wo *aeadCrypter) incrementIndex() error { - index := wo.chunkIndex - if len(index) == 0 { - return errors.AEADError("Index has length 0") - } - for i := len(index) - 1; i >= 0; i-- { - if index[i] < 255 { - index[i]++ - return nil - } - index[i] = 0 - } - return errors.AEADError("cannot further increment index") -} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index c55cecd3..2f5cad71 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -47,6 +47,8 @@ func (c *Compressed) parse(r io.Reader) error { } switch buf[0] { + case 0: + c.Body = r case 1: c.Body = flate.NewReader(r) case 2: diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 5652dd81..04994bec 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -10,6 +10,8 @@ import ( "io" "math/big" "time" + + "github.com/ProtonMail/go-crypto/openpgp/s2k" ) // Config collects a number of parameters along with sensible defaults. @@ -33,16 +35,24 @@ type Config struct { DefaultCompressionAlgo CompressionAlgo // CompressionConfig configures the compression settings. CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when + // S2K (String to Key) config, used for key derivation in the context of secret key encryption + // and password-encrypted data. + // If nil, the default configuration is used + S2KConfig *s2k.Config + // Iteration count for Iterated S2K (String to Key). + // Only used if sk2.Mode is nil. + // This value is duplicated here from s2k.Config for backwards compatibility. + // It determines the strength of the passphrase stretching when // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all // values in the above range can be represented. S2KCount will // be rounded up to the next representable value if it cannot // be encoded exactly. When set, it is strongly encrouraged to // use a value that is at least 65536. See RFC 4880 Section // 3.7.1.3. + // + // Deprecated: SK2Count should be configured in S2KConfig instead. S2KCount int // RSABits is the number of bits in new RSA keys made with NewEntity. // If zero, then 2048 bit keys are created. @@ -52,6 +62,9 @@ type Config struct { Algorithm PublicKeyAlgorithm // Some known primes that are optionally prepopulated by the caller RSAPrimes []*big.Int + // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA, + // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used. + Curve Curve // AEADConfig configures the use of the new AEAD Encrypted Data Packet, // defined in the draft of the next version of the OpenPGP specification. // If a non-nil AEADConfig is passed, usage of this packet is enabled. By @@ -78,6 +91,25 @@ type Config struct { // By default, the signing key is selected automatically, preferring // signing subkeys if available. SigningKeyId uint64 + // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28) + // when producing a generic certification signature onto an existing user ID. + // The identity must be present in the signer Entity. + SigningIdentity string + // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read + // encrypted messages without Modification Detection Code (MDC). + // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented + // in most OpenPGP implementations. Messages without MDC are considered unnecessarily + // insecure and should be prevented whenever possible. + // In case one needs to deal with messages from very old OpenPGP implementations, there + // might be no other way than to tolerate the missing MDC. Setting this flag, allows this + // mode of operation. It should be considered a measure of last resort. + InsecureAllowUnauthenticatedMessages bool + // KnownNotations is a map of Notation Data names to bools, which controls + // the notation names that are allowed to be present in critical Notation Data + // signature subpackets. + KnownNotations map[string]bool + // SignatureNotations is a list of Notations to be added to any signatures. + SignatureNotations []*Notation } func (c *Config) Random() io.Reader { @@ -103,9 +135,9 @@ func (c *Config) Cipher() CipherFunction { func (c *Config) Now() time.Time { if c == nil || c.Time == nil { - return time.Now() + return time.Now().Truncate(time.Second) } - return c.Time() + return c.Time().Truncate(time.Second) } // KeyLifetime returns the validity period of the key. @@ -131,13 +163,6 @@ func (c *Config) Compression() CompressionAlgo { return c.DefaultCompressionAlgo } -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} - func (c *Config) RSAModulusBits() int { if c == nil || c.RSABits == 0 { return 2048 @@ -152,6 +177,34 @@ func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { return c.Algorithm } +func (c *Config) CurveName() Curve { + if c == nil || c.Curve == "" { + return Curve25519 + } + return c.Curve +} + +// Deprecated: The hash iterations should now be queried via the S2K() method. +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) S2K() *s2k.Config { + if c == nil { + return nil + } + // for backwards compatibility + if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { + return &s2k.Config{ + S2KCount: c.S2KCount, + } + } + return c.S2KConfig +} + func (c *Config) AEAD() *AEADConfig { if c == nil { return nil @@ -165,3 +218,31 @@ func (c *Config) SigningKey() uint64 { } return c.SigningKeyId } + +func (c *Config) SigningUserId() string { + if c == nil { + return "" + } + return c.SigningIdentity +} + +func (c *Config) AllowUnauthenticatedMessages() bool { + if c == nil { + return false + } + return c.InsecureAllowUnauthenticatedMessages +} + +func (c *Config) KnownNotation(notationName string) bool { + if c == nil { + return false + } + return c.KnownNotations[notationName] +} + +func (c *Config) Notations() []*Notation { + if c == nil { + return nil + } + return c.SignatureNotations +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go index 801aec92..eeff2902 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -25,7 +25,7 @@ const encryptedKeyVersion = 3 type EncryptedKey struct { KeyId uint64 Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet Key []byte // only valid after a successful Decrypt encryptedMPI1, encryptedMPI2 encoding.Field @@ -123,6 +123,10 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { } e.CipherFunc = CipherFunction(b[0]) + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + e.Key = b[1 : len(b)-2] expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) checksum := checksumKeyMaterial(e.Key) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go new file mode 100644 index 00000000..2c3e3f50 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go @@ -0,0 +1,29 @@ +package packet + +// Notation type represents a Notation Data subpacket +// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16 +type Notation struct { + Name string + Value []byte + IsCritical bool + IsHumanReadable bool +} + +func (notation *Notation) getData() []byte { + nameData := []byte(notation.Name) + nameLen := len(nameData) + valueLen := len(notation.Value) + + data := make([]byte, 8+nameLen+valueLen) + if notation.IsHumanReadable { + data[0] = 0x80 + } + + data[4] = byte(nameLen >> 8) + data[5] = byte(nameLen) + data[6] = byte(valueLen >> 8) + data[7] = byte(valueLen) + copy(data[8:8+nameLen], nameData) + copy(data[8+nameLen:], notation.Value) + return data +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go index 41c35de2..033fb2d7 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -8,7 +8,7 @@ import ( "crypto" "encoding/binary" "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/s2k" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "io" "strconv" ) @@ -37,7 +37,7 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { } var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) + ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) if !ok { return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) } @@ -55,7 +55,7 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { buf[0] = onePassSignatureVersion buf[1] = uint8(ops.SigType) var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) + buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) if !ok { return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index e3879199..4f820407 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -84,8 +84,9 @@ func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { // OpaqueSubpacket represents an unparsed OpenPGP subpacket, // as found in signature and user attribute packets. type OpaqueSubpacket struct { - SubType uint8 - Contents []byte + SubType uint8 + EncodedLength []byte // Store the original encoded length for signature verifications. + Contents []byte } // OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from @@ -109,6 +110,7 @@ func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { // RFC 4880, section 5.2.3.1 var subLen uint32 + var encodedLength []byte if len(contents) < 1 { goto Truncated } @@ -119,6 +121,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:1] subLen = uint32(contents[0]) contents = contents[1:] case contents[0] < 255: @@ -126,6 +129,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:2] subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 contents = contents[2:] default: @@ -133,16 +137,19 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:5] subLen = uint32(contents[1])<<24 | uint32(contents[2])<<16 | uint32(contents[3])<<8 | uint32(contents[4]) contents = contents[5:] + } if subLen > uint32(len(contents)) || subLen == 0 { goto Truncated } subPacket.SubType = contents[0] + subPacket.EncodedLength = encodedLength subPacket.Contents = contents[1:subLen] return Truncated: @@ -152,7 +159,9 @@ Truncated: func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) + copy(buf, osp.EncodedLength) + n := len(osp.EncodedLength) + buf[n] = osp.SubType if _, err = w.Write(buf[:n+1]); err != nil { return diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 65203813..4d86a7da 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -302,21 +302,21 @@ func consumeAll(r io.Reader) (n int64, err error) { type packetType uint8 const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 - packetTypeAEADEncrypted packetType = 20 + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 + packetTypeAEADEncrypted packetType = 20 ) // EncryptedDataPacket holds encrypted data. It is currently implemented by @@ -354,16 +354,16 @@ func Read(r io.Reader) (p Packet, err error) { case packetTypeCompressed: p = new(Compressed) case packetTypeSymmetricallyEncrypted: - err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") + p = new(SymmetricallyEncrypted) case packetTypeLiteralData: p = new(LiteralData) case packetTypeUserId: p = new(UserId) case packetTypeUserAttribute: p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: + case packetTypeSymmetricallyEncryptedIntegrityProtected: se := new(SymmetricallyEncrypted) - se.MDC = true + se.IntegrityProtected = true p = se case packetTypeAEADEncrypted: p = new(AEADEncrypted) @@ -384,18 +384,18 @@ func Read(r io.Reader) (p Packet, err error) { type SignatureType uint8 const ( - SigTypeBinary SignatureType = 0x00 - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 - SigTypeCertificationRevocation = 0x30 + SigTypeBinary SignatureType = 0x00 + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for @@ -455,6 +455,11 @@ func (cipher CipherFunction) KeySize() int { return algorithm.CipherFunction(cipher).KeySize() } +// IsSupported returns true if the cipher is supported from the library +func (cipher CipherFunction) IsSupported() bool { + return algorithm.CipherFunction(cipher).KeySize() > 0 +} + // blockSize returns the block size, in bytes, of cipher. func (cipher CipherFunction) blockSize() int { return algorithm.CipherFunction(cipher).BlockSize() @@ -490,15 +495,16 @@ const ( // AEADMode represents the different Authenticated Encryption with Associated // Data specified for OpenPGP. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 type AEADMode algorithm.AEADMode const ( - AEADModeEAX AEADMode = 1 - AEADModeOCB AEADMode = 2 - AEADModeExperimentalGCM AEADMode = 100 + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeGCM AEADMode = 3 ) -func (mode AEADMode) NonceLength() int { +func (mode AEADMode) IvLength() int { return algorithm.AEADMode(mode).NonceLength() } @@ -521,3 +527,25 @@ const ( KeyCompromised ReasonForRevocation = 2 KeyRetired ReasonForRevocation = 3 ) + +// Curve is a mapping to supported ECC curves for key generation. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats +type Curve string + +const ( + Curve25519 Curve = "Curve25519" + Curve448 Curve = "Curve448" + CurveNistP256 Curve = "P256" + CurveNistP384 Curve = "P384" + CurveNistP521 Curve = "P521" + CurveSecP256k1 Curve = "SecP256k1" + CurveBrainpoolP256 Curve = "BrainpoolP256" + CurveBrainpoolP384 Curve = "BrainpoolP384" + CurveBrainpoolP512 Curve = "BrainpoolP512" +) + +// TrustLevel represents a trust level per RFC4880 5.2.3.13 +type TrustLevel uint8 + +// TrustAmount represents a trust amount per RFC4880 5.2.3.13 +type TrustAmount uint8 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go index 854f9c4b..2fc43864 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -9,27 +9,22 @@ import ( "crypto" "crypto/cipher" "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/sha1" - "fmt" "io" "io/ioutil" "math/big" "strconv" "time" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "golang.org/x/crypto/curve25519" - "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" "github.com/ProtonMail/go-crypto/openpgp/s2k" - "golang.org/x/crypto/ed25519" ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -54,7 +49,7 @@ type PrivateKey struct { s2kParams *s2k.Params } -//S2KType s2k packet type +// S2KType s2k packet type type S2KType uint8 const ( @@ -94,10 +89,9 @@ func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *Private return pk } -func NewEdDSAPrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { +func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey { pk := new(PrivateKey) - pub := priv.Public().(ed25519.PublicKey) - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pub) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey) pk.PrivateKey = priv return pk } @@ -111,25 +105,25 @@ func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKe // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that // implements RSA, ECDSA or EdDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { +func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { pk := new(PrivateKey) // In general, the public Keys should be used as pointers. We still // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - case *ed25519.PublicKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, pubkey) - case ed25519.PublicKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey) + switch pubkey := signer.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case *ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case *eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + panic("openpgp: unknown signer type in NewSignerPrivateKey") } pk.PrivateKey = signer return pk @@ -185,6 +179,9 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } pk.cipher = CipherFunction(buf[0]) + if pk.cipher != 0 && !pk.cipher.IsSupported() { + return errors.UnsupportedError("unsupported cipher function in private key") + } pk.s2kParams, err = s2k.ParseIntoParams(r) if err != nil { return @@ -239,6 +236,18 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } } if !pk.Encrypted { + if len(privateKeyData) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] return pk.parsePrivateKey(privateKeyData) } @@ -294,14 +303,8 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { return err } l = buf.Len() - if pk.sha1Checksum { - h := sha1.New() - h.Write(buf.Bytes()) - buf.Write(h.Sum(nil)) - } else { - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) - } + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) priv = buf.Bytes() } else { priv, l = pk.encryptedData, len(pk.encryptedData) @@ -353,23 +356,22 @@ func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { } func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()) + _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes()) return err } -func serializeEdDSAPrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { - keySize := ed25519.PrivateKeySize - ed25519.PublicKeySize - _, err := w.Write(encoding.NewMPI((*priv)[:keySize]).EncodedBytes()) +func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) return err } func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { - _, err := w.Write(encoding.NewMPI(priv.D).EncodedBytes()) + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) return err } -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { +// decrypt decrypts an encrypted private key using a decryption key. +func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if pk.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } @@ -377,9 +379,7 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { return nil } - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) + block := pk.cipher.new(decryptionKey) cfb := cipher.NewCFBDecrypter(block, pk.iv) data := make([]byte, len(pk.encryptedData)) @@ -428,35 +428,79 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { return nil } -// Encrypt encrypts an unencrypted private key using a passphrase. -func (pk *PrivateKey) Encrypt(passphrase []byte) error { +func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize()) + if err != nil { + return err + } + return pk.decrypt(key) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + return pk.decrypt(key) +} + +// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. +// Avoids recomputation of similar s2k key derivations. +func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { + // Create a cache to avoid recomputation of key derviations for the same passphrase. + s2kCache := &s2k.Cache{} + for _, key := range keys { + if key != nil && !key.Dummy() && key.Encrypted { + err := key.decryptWithCache(passphrase, s2kCache) + if err != nil { + return err + } + } + } + return nil +} + +// encrypt encrypts an unencrypted private key. +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if pk.Encrypted { + return nil + } + // check if encryptionKey has the correct size + if len(key) != cipherFunction.KeySize() { + return errors.InvalidArgumentError("supplied encryption key has the wrong size") + } + priv := bytes.NewBuffer(nil) err := pk.serializePrivateKey(priv) if err != nil { return err } - //Default config of private key encryption - pk.cipher = CipherAES256 - s2kConfig := &s2k.Config{ - S2KMode: 3, //Iterated - S2KCount: 65536, - Hash: crypto.SHA256, - } - - pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) - if err != nil { - return err - } - privateKeyBytes := priv.Bytes() - key := make([]byte, pk.cipher.KeySize()) - - pk.sha1Checksum = true + pk.cipher = cipherFunction + pk.s2kParams = params pk.s2k, err = pk.s2kParams.Function() if err != nil { return err - } - pk.s2k(key, passphrase) + } + + privateKeyBytes := priv.Bytes() + pk.sha1Checksum = true block := pk.cipher.new(key) pk.iv = make([]byte, pk.cipher.blockSize()) _, err = rand.Read(pk.iv) @@ -487,6 +531,62 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error { return err } +// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config. +func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + key := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(key, passphrase) + // Encrypt the private key with the derived encryption key. + return pk.encrypt(key, params, config.Cipher()) +} + +// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. +// Only derives one key from the passphrase, which is then used to encrypt each key. +func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + encryptionKey := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(encryptionKey, passphrase) + for _, key := range keys { + if key != nil && !key.Dummy() && !key.Encrypted { + err = key.encrypt(encryptionKey, params, config.Cipher()) + if err != nil { + return err + } + } + } + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + // Default config of private key encryption + config := &Config{ + S2KConfig: &s2k.Config{ + S2KMode: s2k.IteratedSaltedS2K, + S2KCount: 65536, + Hash: crypto.SHA256, + } , + DefaultCipher: CipherAES256, + } + return pk.EncryptWithConfig(passphrase, config) +} + func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { switch priv := pk.PrivateKey.(type) { case *rsa.PrivateKey: @@ -497,7 +597,7 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeElGamalPrivateKey(w, priv) case *ecdsa.PrivateKey: err = serializeECDSAPrivateKey(w, priv) - case *ed25519.PrivateKey: + case *eddsa.PrivateKey: err = serializeEdDSAPrivateKey(w, priv) case *ecdh.PrivateKey: err = serializeECDHPrivateKey(w, priv) @@ -601,8 +701,7 @@ func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - ecdsaPriv := new(ecdsa.PrivateKey) - ecdsaPriv.PublicKey = *ecdsaPub + ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub) buf := bytes.NewBuffer(data) d := new(encoding.MPI) @@ -610,8 +709,10 @@ func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { return err } - ecdsaPriv.D = new(big.Int).SetBytes(d.Bytes()) - if err := validateECDSAParameters(ecdsaPriv); err != nil { + if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil { + return err + } + if err := ecdsa.Validate(ecdsaPriv); err != nil { return err } pk.PrivateKey = ecdsaPriv @@ -621,8 +722,7 @@ func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) - ecdhPriv := new(ecdh.PrivateKey) - ecdhPriv.PublicKey = *ecdhPub + ecdhPriv := ecdh.NewPrivateKey(*ecdhPub) buf := bytes.NewBuffer(data) d := new(encoding.MPI) @@ -630,18 +730,23 @@ func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { return err } - ecdhPriv.D = d.Bytes() - if err := validateECDHParameters(ecdhPriv); err != nil { + if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil { return err } + + if err := ecdh.Validate(ecdhPriv); err != nil { + return err + } + pk.PrivateKey = ecdhPriv return nil } func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { - eddsaPub := pk.PublicKey.PublicKey.(*ed25519.PublicKey) - eddsaPriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize) + eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) + eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) + eddsaPriv.PublicKey = *eddsaPub buf := bytes.NewBuffer(data) d := new(encoding.MPI) @@ -649,64 +754,16 @@ func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { return err } - priv := d.Bytes() - copy(eddsaPriv[32-len(priv):32], priv) - copy(eddsaPriv[32:], (*eddsaPub)[:]) - if err := validateEdDSAParameters(&eddsaPriv); err != nil { + if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil { return err } - pk.PrivateKey = &eddsaPriv - return nil -} + if err := eddsa.Validate(eddsaPriv); err != nil { + return err + } -func validateECDSAParameters(priv *ecdsa.PrivateKey) error { - return validateCommonECC(priv.Curve, priv.D.Bytes(), priv.X, priv.Y) -} + pk.PrivateKey = eddsaPriv -func validateECDHParameters(priv *ecdh.PrivateKey) error { - if priv.CurveType != ecc.Curve25519 { - return validateCommonECC(priv.Curve, priv.D, priv.X, priv.Y) - } - // Handle Curve25519 - Q := priv.X.Bytes()[1:] - var d [32]byte - // Copy reversed d - l := len(priv.D) - for i := 0; i < l; i++ { - d[i] = priv.D[l-i-1] - } - var expectedQ [32]byte - curve25519.ScalarBaseMult(&expectedQ, &d) - if !bytes.Equal(Q, expectedQ[:]) { - return errors.KeyInvalidError("ECDH curve25519: invalid point") - } - return nil -} - -func validateCommonECC(curve elliptic.Curve, d []byte, X, Y *big.Int) error { - // the public point should not be at infinity (0,0) - zero := new(big.Int) - if X.Cmp(zero) == 0 && Y.Cmp(zero) == 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", curve.Params().Name)) - } - // re-derive the public point Q' = (X,Y) = dG - // to compare to declared Q in public key - expectedX, expectedY := curve.ScalarBaseMult(d) - if X.Cmp(expectedX) != 0 || Y.Cmp(expectedY) != 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", curve.Params().Name)) - } - return nil -} - -func validateEdDSAParameters(priv *ed25519.PrivateKey) error { - // In EdDSA, the serialized public point is stored as part of private key (together with the seed), - // hence we can re-derive the key from the seed - seed := priv.Seed() - expectedPriv := ed25519.NewKeyFromSeed(seed) - if !bytes.Equal(*priv, expectedPriv) { - return errors.KeyInvalidError("eddsa: invalid point") - } return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 297dc40b..3402b8c1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -7,8 +7,6 @@ package packet import ( "crypto" "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" "crypto/rsa" "crypto/sha1" "crypto/sha256" @@ -22,12 +20,13 @@ import ( "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "golang.org/x/crypto/ed25519" ) type kdfHashFunction byte @@ -124,10 +123,10 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey CreationTime: creationTime, PubKeyAlgo: PubKeyAlgoECDSA, PublicKey: pub, - p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + p: encoding.NewMPI(pub.MarshalPoint()), } - curveInfo := ecc.FindByCurve(pub.Curve) + curveInfo := ecc.FindByCurve(pub.GetCurve()) if curveInfo == nil { panic("unknown elliptic curve") } @@ -138,39 +137,29 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { var pk *PublicKey - var curveInfo *ecc.CurveInfo var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) - if pub.CurveType == ecc.Curve25519 { - pk = &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDH, - PublicKey: pub, - p: encoding.NewMPI(pub.X.Bytes()), - kdf: kdf, - } - curveInfo = ecc.FindByName("Curve25519") - } else { - pk = &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDH, - PublicKey: pub, - p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), - kdf: kdf, - } - curveInfo = ecc.FindByCurve(pub.Curve) + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + kdf: kdf, } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + if curveInfo == nil { panic("unknown elliptic curve") } + pk.oid = curveInfo.Oid pk.setFingerprintAndKeyId() return pk } -func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { - curveInfo := ecc.FindByName("Ed25519") +func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey { + curveInfo := ecc.FindByCurve(pub.GetCurve()) pk := &PublicKey{ Version: 4, CreationTime: creationTime, @@ -178,7 +167,7 @@ func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKe PublicKey: pub, oid: curveInfo.Oid, // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B - p: encoding.NewMPI(append([]byte{0x40}, *pub...)), + p: encoding.NewMPI(pub.MarshalPoint()), } pk.setFingerprintAndKeyId() @@ -340,17 +329,20 @@ func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { return } - var c elliptic.Curve curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil || curveInfo.SigAlgorithm != ecc.ECDSA { + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.ECDSACurve) + if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) } - c = curveInfo.Curve - x, y := elliptic.Unmarshal(c, pk.p.Bytes()) - if x == nil { - return errors.UnsupportedError("failed to parse EC point") - } - pk.PublicKey = &ecdsa.PublicKey{Curve: c, X: x, Y: y} + + ecdsaKey := ecdsa.NewPublicKey(c) + err = ecdsaKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdsaKey + return } @@ -371,24 +363,16 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { } curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.ECDHCurve) + if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) } - c := curveInfo.Curve - cType := curveInfo.CurveType - - var x, y *big.Int - if cType == ecc.Curve25519 { - x = new(big.Int) - x.SetBytes(pk.p.Bytes()) - } else { - x, y = elliptic.Unmarshal(c, pk.p.Bytes()) - } - if x == nil { - return errors.UnsupportedError("failed to parse EC point") - } - if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) } @@ -404,16 +388,10 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) } - pk.PublicKey = &ecdh.PublicKey{ - CurveType: cType, - Curve: c, - X: x, - Y: y, - KDF: ecdh.KDF{ - Hash: kdfHash, - Cipher: kdfCipher, - }, - } + ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher) + err = ecdhKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdhKey + return } @@ -423,26 +401,37 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil || curveInfo.SigAlgorithm != ecc.EdDSA { + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.EdDSACurve) + if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) } + pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return } - eddsa := make(ed25519.PublicKey, ed25519.PublicKeySize) + if len(pk.p.Bytes()) == 0 { + return errors.StructuralError("empty EdDSA public key") + } + + pub := eddsa.NewPublicKey(c) + switch flag := pk.p.Bytes()[0]; flag { case 0x04: // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) case 0x40: - copy(eddsa[:], pk.p.Bytes()[1:]) + err = pub.UnmarshalPoint(pk.p.Bytes()) default: return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) } - pk.PublicKey = &eddsa + pk.PublicKey = pub return } @@ -611,7 +600,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } signed.Write(sig.HashSuffix) hashBytes := signed.Sum(nil) - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { return errors.SignatureError("hash tag doesn't match") } @@ -645,16 +634,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } return nil case PubKeyAlgoEdDSA: - eddsaPublicKey := pk.PublicKey.(*ed25519.PublicKey) - - sigR := sig.EdDSASigR.Bytes() - sigS := sig.EdDSASigS.Bytes() - - eddsaSig := make([]byte, ed25519.SignatureSize) - copy(eddsaSig[32-len(sigR):32], sigR) - copy(eddsaSig[64-len(sigS):], sigS) - - if !ed25519.Verify(*eddsaPublicKey, hashBytes, eddsaSig) { + eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey) + if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) { return errors.SignatureError("EdDSA verification failure") } return nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index 94ae9ad9..10215fe5 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -44,9 +44,17 @@ func (r *Reader) Next() (p Packet, err error) { continue } // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err } return nil, io.EOF diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6765ea75..80d0bb98 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -8,18 +8,17 @@ import ( "bytes" "crypto" "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" "encoding/binary" "hash" "io" - "math/big" "strconv" "time" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "github.com/ProtonMail/go-crypto/openpgp/s2k" ) const ( @@ -28,6 +27,10 @@ const ( KeyFlagSign KeyFlagEncryptCommunications KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey ) // Signature represents a signature. See RFC 4880, section 5.2. @@ -63,10 +66,24 @@ type Signature struct { SigLifetimeSecs, KeyLifetimeSecs *uint32 PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - PreferredAEAD []uint8 + PreferredCipherSuites [][2]uint8 IssuerKeyId *uint64 IssuerFingerprint []byte + SignerUserId *string IsPrimaryId *bool + Notations []*Notation + + // TrustLevel and TrustAmount can be set by the signer to assert that + // the key is not only valid but also trustworthy at the specified + // level. + // See RFC 4880, section 5.2.3.13 for details. + TrustLevel TrustLevel + TrustAmount TrustAmount + + // TrustRegularExpression can be used in conjunction with trust Signature + // packets to limit the scope of the trust that is extended. + // See RFC 4880, section 5.2.3.14 for details. + TrustRegularExpression *string // PolicyURI can be set to the URI of a document that describes the // policy under which the signature was issued. See RFC 4880, section @@ -75,8 +92,8 @@ type Signature struct { // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. @@ -85,8 +102,8 @@ type Signature struct { // In a self-signature, these flags are set there is a features subpacket // indicating that the issuer implementation supports these features - // (section 5.2.5.25). - MDC, AEAD, V5Keys bool + // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket + SEIPDv1, SEIPDv2 bool // EmbeddedSignature, if non-nil, is a signature of the parent key, by // this key. This prevents an attacker from claiming another's signing @@ -122,7 +139,13 @@ func (sig *Signature) parse(r io.Reader) (err error) { } var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) + + if sig.Version < 5 { + sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + } else { + sig.Hash, ok = algorithm.HashIdToHash(buf[2]) + } + if !ok { return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) } @@ -133,7 +156,11 @@ func (sig *Signature) parse(r io.Reader) (err error) { if err != nil { return } - sig.buildHashSuffix(hashedSubpackets) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, hashedSubpackets, true) if err != nil { return @@ -217,19 +244,23 @@ type signatureSubpacketType uint8 const ( creationTimeSubpacket signatureSubpacketType = 2 signatureExpirationSubpacket signatureSubpacketType = 3 + trustSubpacket signatureSubpacketType = 5 + regularExpressionSubpacket signatureSubpacketType = 6 keyExpirationSubpacket signatureSubpacketType = 9 prefSymmetricAlgosSubpacket signatureSubpacketType = 11 issuerSubpacket signatureSubpacketType = 16 + notationDataSubpacket signatureSubpacketType = 20 prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 primaryUserIdSubpacket signatureSubpacketType = 25 policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 + signerUserIdSubpacket signatureSubpacketType = 28 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 embeddedSignatureSubpacket signatureSubpacketType = 32 issuerFingerprintSubpacket signatureSubpacketType = 33 - prefAeadAlgosSubpacket signatureSubpacketType = 34 + prefCipherSuitesSubpacket signatureSubpacketType = 39 ) // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. @@ -240,6 +271,10 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r packetType signatureSubpacketType isCritical bool ) + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } switch { case subpacket[0] < 192: length = uint32(subpacket[0]) @@ -273,12 +308,14 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r isCritical = subpacket[0]&0x80 == 0x80 subpacket = subpacket[1:] sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + if !isHashed && + packetType != issuerSubpacket && + packetType != issuerFingerprintSubpacket && + packetType != embeddedSignatureSubpacket { + return + } switch packetType { case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } if len(subpacket) != 4 { err = errors.StructuralError("signature creation time not four bytes") return @@ -287,20 +324,35 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.CreationTime = time.Unix(int64(t), 0) case signatureExpirationSubpacket: // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } if len(subpacket) != 4 { err = errors.StructuralError("expiration subpacket with bad length") return } sig.SigLifetimeSecs = new(uint32) *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { + case trustSubpacket: + if len(subpacket) != 2 { + err = errors.StructuralError("trust subpacket with bad length") return } + // Trust level and amount, section 5.2.3.13 + sig.TrustLevel = TrustLevel(subpacket[0]) + sig.TrustAmount = TrustAmount(subpacket[1]) + case regularExpressionSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("regexp subpacket with bad length") + return + } + // Trust regular expression, section 5.2.3.14 + // RFC specifies the string should be null-terminated; remove a null byte from the end + if subpacket[len(subpacket)-1] != 0x00 { + err = errors.StructuralError("expected regular expression to be null-terminated") + return + } + trustRegularExpression := string(subpacket[:len(subpacket)-1]) + sig.TrustRegularExpression = &trustRegularExpression + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 if len(subpacket) != 4 { err = errors.StructuralError("key expiration subpacket with bad length") return @@ -309,41 +361,52 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) case prefSymmetricAlgosSubpacket: // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } sig.PreferredSymmetric = make([]byte, len(subpacket)) copy(sig.PreferredSymmetric, subpacket) case issuerSubpacket: + // Issuer, section 5.2.3.5 if sig.Version > 4 { err = errors.StructuralError("issuer subpacket found in v5 key") + return } - // Issuer, section 5.2.3.5 if len(subpacket) != 8 { err = errors.StructuralError("issuer subpacket with bad length") return } sig.IssuerKeyId = new(uint64) *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { + case notationDataSubpacket: + // Notation data, section 5.2.3.16 + if len(subpacket) < 8 { + err = errors.StructuralError("notation data subpacket with bad length") return } + + nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5]) + valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7]) + if len(subpacket) != int(nameLength)+int(valueLength)+8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + notation := Notation{ + IsHumanReadable: (subpacket[0] & 0x80) == 0x80, + Name: string(subpacket[8:(nameLength + 8)]), + Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)], + IsCritical: isCritical, + } + + sig.Notations = append(sig.Notations, ¬ation) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 sig.PreferredHash = make([]byte, len(subpacket)) copy(sig.PreferredHash, subpacket) case prefCompressionSubpacket: // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } sig.PreferredCompression = make([]byte, len(subpacket)) copy(sig.PreferredCompression, subpacket) case primaryUserIdSubpacket: // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } if len(subpacket) != 1 { err = errors.StructuralError("primary user id subpacket with bad length") return @@ -354,9 +417,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } case keyFlagsSubpacket: // Key flags, section 5.2.3.21 - if !isHashed { - return - } if len(subpacket) == 0 { err = errors.StructuralError("empty key flags subpacket") return @@ -374,11 +434,20 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if subpacket[0]&KeyFlagEncryptStorage != 0 { sig.FlagEncryptStorage = true } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } + case signerUserIdSubpacket: + userId := string(subpacket) + sig.SignerUserId = &userId case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } if len(subpacket) == 0 { err = errors.StructuralError("empty revocation reason subpacket") return @@ -390,18 +459,13 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r // Features subpacket, section 5.2.3.24 specifies a very general // mechanism for OpenPGP implementations to signal support for new // features. - if !isHashed { - return - } if len(subpacket) > 0 { if subpacket[0]&0x01 != 0 { - sig.MDC = true + sig.SEIPDv1 = true } - if subpacket[0]&0x02 != 0 { - sig.AEAD = true - } - if subpacket[0]&0x04 != 0 { - sig.V5Keys = true + // 0x02 and 0x04 are reserved + if subpacket[0]&0x08 != 0 { + sig.SEIPDv2 = true } } case embeddedSignatureSubpacket: @@ -424,11 +488,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } case policyUriSubpacket: // Policy URI, section 5.2.3.20 - if !isHashed { - return - } sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("empty issuer fingerprint subpacket") + return + } v, l := subpacket[0], len(subpacket[1:]) if v == 5 && l != 32 || v != 5 && l != 20 { return nil, errors.StructuralError("bad fingerprint length") @@ -441,13 +506,19 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } else { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) } - case prefAeadAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.8 - if !isHashed { + case prefCipherSuitesSubpacket: + // Preferred AEAD cipher suites + // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites + if len(subpacket)%2 != 0 { + err = errors.StructuralError("invalid aead cipher suite length") return } - sig.PreferredAEAD = make([]byte, len(subpacket)) - copy(sig.PreferredAEAD, subpacket) + + sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2) + + for i := 0; i < len(subpacket)/2; i++ { + sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]} + } default: if isCritical { err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) @@ -545,7 +616,15 @@ func (sig *Signature) SigExpired(currentTime time.Time) bool { // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { - hash, ok := s2k.HashToHashId(sig.Hash) + var hashId byte + var ok bool + + if sig.Version < 5 { + hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash) + } else { + hashId, ok = algorithm.HashToHashId(sig.Hash) + } + if !ok { sig.HashSuffix = nil return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) @@ -555,7 +634,7 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { uint8(sig.Version), uint8(sig.SigType), uint8(sig.PubKeyAlgo), - uint8(hash), + uint8(hashId), uint8(len(hashedSubpackets) >> 8), uint8(len(hashedSubpackets)), }) @@ -636,26 +715,19 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.DSASigS = new(encoding.MPI).SetBig(s) } case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } + sk := priv.PrivateKey.(*ecdsa.PrivateKey) + r, s, err := ecdsa.Sign(config.Random(), sk, digest) + if err == nil { sig.ECDSASigR = new(encoding.MPI).SetBig(r) sig.ECDSASigS = new(encoding.MPI).SetBig(s) } case PubKeyAlgoEdDSA: - sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, crypto.Hash(0)) + sk := priv.PrivateKey.(*eddsa.PrivateKey) + r, s, err := eddsa.Sign(sk, digest) if err == nil { - sig.EdDSASigR = encoding.NewMPI(sigdata[:32]) - sig.EdDSASigS = encoding.NewMPI(sigdata[32:]) + sig.EdDSASigR = encoding.NewMPI(r) + sig.EdDSASigS = encoding.NewMPI(s) } default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) @@ -664,19 +736,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e return } -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - // SignUserId computes a signature from priv, asserting that pub is a valid // key for the identity id. On success, the signature is stored in sig. Call // Serialize to write it out. @@ -845,12 +904,15 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.IssuerKeyId != nil && sig.Version == 4 { keyId := make([]byte, 8) binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) } + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) + } if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) @@ -873,26 +935,52 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagEncryptStorage { flags |= KeyFlagEncryptStorage } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) } + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + // The following subpackets may only appear in self-signatures. var features = byte(0x00) - if sig.MDC { + if sig.SEIPDv1 { features |= 0x01 } - if sig.AEAD { - features |= 0x02 - } - if sig.V5Keys { - features |= 0x04 + if sig.SEIPDv2 { + features |= 0x08 } if features != 0x00 { subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) } + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { keyLifetime := make([]byte, 4) binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) @@ -919,8 +1007,13 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) } - if len(sig.PreferredAEAD) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) + if len(sig.PreferredCipherSuites) > 0 { + serialized := make([]byte, len(sig.PreferredCipherSuites)*2) + for i, cipherSuite := range sig.PreferredCipherSuites { + serialized[2*i] = cipherSuite[0] + serialized[2*i+1] = cipherSuite[1] + } + subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) } // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. @@ -962,7 +1055,7 @@ func (sig *Signature) AddMetadataToHashSuffix() { n := sig.HashSuffix[len(sig.HashSuffix)-8:] l := uint64( uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | - uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) suffix := bytes.NewBuffer(nil) suffix.Write(sig.HashSuffix[:l]) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index 0e9f51d5..bac2b132 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -14,8 +14,8 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/s2k" ) -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. +// This is the largest session key that we'll support. Since at most 256-bit cipher +// is supported in OpenPGP, this is large enough to contain also the auth tag. const maxSessionKeySizeInBytes = 64 // SymmetricKeyEncrypted represents a passphrase protected session key. See RFC @@ -25,13 +25,16 @@ type SymmetricKeyEncrypted struct { CipherFunc CipherFunction Mode AEADMode s2k func(out, in []byte) - aeadNonce []byte - encryptedKey []byte + iv []byte + encryptedKey []byte // Contains also the authentication tag for AEAD } +// parse parses an SymmetricKeyEncrypted packet as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte + var buf [1]byte + + // Version if _, err := readFull(r, buf[:]); err != nil { return err } @@ -39,17 +42,22 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { if ske.Version != 4 && ske.Version != 5 { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } - ske.CipherFunc = CipherFunction(buf[1]) - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + + // Cipher function + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.CipherFunc = CipherFunction(buf[0]) + if !ske.CipherFunc.IsSupported() { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) } if ske.Version == 5 { - mode := make([]byte, 1) - if _, err := r.Read(mode); err != nil { - return errors.StructuralError("cannot read AEAD octect from packet") + // AEAD mode + if _, err := readFull(r, buf[:]); err != nil { + return errors.StructuralError("cannot read AEAD octet from packet") } - ske.Mode = AEADMode(mode[0]) + ske.Mode = AEADMode(buf[0]) } var err error @@ -61,13 +69,14 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { } if ske.Version == 5 { - // AEAD nonce - nonce := make([]byte, ske.Mode.NonceLength()) - _, err := readFull(r, nonce) - if err != nil && err != io.ErrUnexpectedEOF { - return err + // AEAD IV + iv := make([]byte, ske.Mode.IvLength()) + _, err := readFull(r, iv) + if err != nil { + return errors.StructuralError("cannot read AEAD IV") } - ske.aeadNonce = nonce + + ske.iv = iv } encryptedKey := make([]byte, maxSessionKeySizeInBytes) @@ -128,11 +137,10 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, } func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - blockCipher := CipherFunction(ske.CipherFunc).new(key) - aead := ske.Mode.new(blockCipher) - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata) + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) + + plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) if err != nil { return nil, err } @@ -142,17 +150,12 @@ func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { // SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. // The packet contains a random session key, encrypted by a key derived from // the given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on -// whether config.AEADConfig != nil. +// SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - sessionKey := make([]byte, keySize) + sessionKey := make([]byte, cipherFunc.KeySize()) _, err = io.ReadFull(config.Random(), sessionKey) if err != nil { return @@ -169,9 +172,8 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf // SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. // The packet contains the given session key, encrypted by a key derived from -// the given passphrase. The session key must be passed to -// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on -// whether config.AEADConfig != nil. +// the given passphrase. The returned session key must be passed to +// SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { var version int @@ -181,16 +183,17 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass version = 4 } cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + // cipherFunc must be AES + if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 { + return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc))) } + keySize := cipherFunc.KeySize() s2kBuf := new(bytes.Buffer) keyEncryptingKey := make([]byte, keySize) // s2k.Serialize salts and stretches the passphrase, and writes the // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K()) if err != nil { return } @@ -201,20 +204,20 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass case 4: packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize case 5: - nonceLen := config.AEAD().Mode().NonceLength() + ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() - packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen + packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { return } - buf := make([]byte, 2) // Symmetric Key Encrypted Version - buf[0] = byte(version) + buf := []byte{byte(version)} + // Cipher function - buf[1] = byte(cipherFunc) + buf = append(buf, byte(cipherFunc)) if version == 5 { // AEAD mode @@ -241,19 +244,20 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass return } case 5: - blockCipher := cipherFunc.new(keyEncryptingKey) mode := config.AEAD().Mode() - aead := mode.new(blockCipher) - // Sample nonce using random reader - nonce := make([]byte, config.AEAD().Mode().NonceLength()) - _, err = io.ReadFull(config.Random(), nonce) + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) + + // Sample iv using random reader + iv := make([]byte, config.AEAD().Mode().IvLength()) + _, err = io.ReadFull(config.Random(), iv) if err != nil { return } // Seal and write (encryptedData includes auth. tag) - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - encryptedData := aead.Seal(nil, nonce, sessionKey, adata) - _, err = w.Write(nonce) + + encryptedData := aead.Seal(nil, iv, sessionKey, adata) + _, err = w.Write(iv) if err != nil { return } @@ -265,3 +269,8 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass return } + +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { + blockCipher := c.new(inputKey) + return mode.new(blockCipher) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go index 8b84de17..e9bbf032 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -5,36 +5,54 @@ package packet import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "hash" "io" - "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" ) +const aeadSaltSize = 32 + // SymmetricallyEncrypted represents a symmetrically encrypted byte string. The // encrypted Contents will consist of more OpenPGP packets. See RFC 4880, // sections 5.7 and 5.13. type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - Contents io.Reader - prefix []byte + Version int + Contents io.Reader // contains tag for version 2 + IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9 + + // Specific to version 1 + prefix []byte + + // Specific to version 2 + Cipher CipherFunction + Mode AEADMode + ChunkSizeByte byte + Salt [aeadSaltSize]byte } -const symmetricallyEncryptedVersion = 1 +const ( + symmetricallyEncryptedVersionMdc = 1 + symmetricallyEncryptedVersionAead = 2 +) func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { + if se.IntegrityProtected { // See RFC 4880, section 5.13. var buf [1]byte _, err := readFull(r, buf[:]) if err != nil { return err } - if buf[0] != symmetricallyEncryptedVersion { + + switch buf[0] { + case symmetricallyEncryptedVersionMdc: + se.Version = symmetricallyEncryptedVersionMdc + case symmetricallyEncryptedVersionAead: + se.Version = symmetricallyEncryptedVersionAead + if err := se.parseAead(r); err != nil { + return err + } + default: return errors.UnsupportedError("unknown SymmetricallyEncrypted version") } } @@ -46,245 +64,27 @@ func (se *SymmetricallyEncrypted) parse(r io.Reader) error { // packet can be read. An incorrect key will only be detected after trying // to decrypt the entire data. func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + if se.Version == symmetricallyEncryptedVersionAead { + return se.decryptAead(key) } - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.Contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - - plaintext := cipher.StreamReader{S: s, R: se.Contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous Contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.ErrMDCMissing - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.ErrMDCMissing - } - } - - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.ErrMDCHashMismatch - } - // The hash already includes the MDC header, but we still check its value - // to confirm encryption correctness - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.ErrMDCMissing - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil + return se.decryptMdc(c, key) } // SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet // to w and returns a WriteCloser to which the to-be-encrypted packets can be // written. // If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected) if err != nil { return } - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return + if aeadSupported { + return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key) } - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - Contents = &seMDCWriter{w: plaintext, h: h} - return + return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go new file mode 100644 index 00000000..e96252c1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -0,0 +1,156 @@ +// Copyright 2023 Proton AG. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha256" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "golang.org/x/crypto/hkdf" +) + +// parseAead parses a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { + headerData := make([]byte, 3) + if n, err := io.ReadFull(r, headerData); n < 3 { + return errors.StructuralError("could not read aead header: " + err.Error()) + } + + // Cipher + se.Cipher = CipherFunction(headerData[0]) + // cipherFunc must have block size 16 to use AEAD + if se.Cipher.blockSize() != 16 { + return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher)) + } + + // Mode + se.Mode = AEADMode(headerData[1]) + if se.Mode.TagLength() == 0 { + return errors.UnsupportedError("unknown aead mode: " + string(se.Mode)) + } + + // Chunk size + se.ChunkSizeByte = headerData[2] + if se.ChunkSizeByte > 16 { + return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte)) + } + + // Salt + if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize { + return errors.StructuralError("could not read aead salt: " + err.Error()) + } + + return nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (se *SymmetricallyEncrypted) associatedData() []byte { + return []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(se.Cipher), + byte(se.Mode), + se.ChunkSizeByte, + } +} + +// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) + + // Carry the first tagLen bytes + tagLen := se.Mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(se.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) + } + + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), + initialNonce: nonce, + associatedData: se.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + reader: se.Contents, + peekedBytes: peekedBytes, + }, nil +} + +// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) { + // cipherFunc must have block size 16 to use AEAD + if cipherSuite.Cipher.blockSize() != 16 { + return nil, errors.InvalidArgumentError("invalid aead cipher function") + } + + if cipherSuite.Cipher.KeySize() != len(inputKey) { + return nil, errors.InvalidArgumentError("error in aead serialization: bad key length") + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + prefix := []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(cipherSuite.Cipher), + byte(cipherSuite.Mode), + chunkSizeByte, + } + + // Write header (that correspond to prefix except first byte) + n, err := ciphertext.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, err + } + + // Random salt + salt := make([]byte, aeadSaltSize) + if _, err := rand.Read(salt); err != nil { + return nil, err + } + + if _, err := ciphertext.Write(salt); err != nil { + return nil, err + } + + aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(chunkSizeByte), + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + writer: ciphertext, + }, nil +} + +func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) { + hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + // Last 64 bits of nonce are the counter + nonce = make([]byte, mode.IvLength()-8) + + _, _ = readFull(hkdfReader, nonce) + + blockCipher := c.new(encryptionKey) + aead = mode.new(blockCipher) + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go new file mode 100644 index 00000000..fa26bebe --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -0,0 +1,256 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// seMdcReader wraps an io.Reader with a no-op Close method. +type seMdcReader struct { + in io.Reader +} + +func (ser seMdcReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seMdcReader) Close() error { + return nil +} + +func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) { + if !c.IsSupported() { + return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c))) + } + + if len(key) != c.KeySize() { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.IntegrityProtected { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.IntegrityProtected { + // IntegrityProtected packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seMdcReader{plaintext}, nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + // Disallow old cipher suites + if !c.IsSupported() || c < CipherAES128 { + return nil, errors.InvalidArgumentError("invalid mdc cipher function") + } + + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length") + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index 0f760ade..88ec72c6 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -42,9 +42,16 @@ func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error if err = jpeg.Encode(&buf, photo, nil); err != nil { return } + + lengthBuf := make([]byte, 5) + n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1) + lengthBuf = lengthBuf[:n] + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) + SubType: UserAttrImageSubpacket, + EncodedLength: lengthBuf, + Contents: buf.Bytes(), + }) } return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index d6bea7d4..614fbafd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -156,5 +156,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index acb7cc6e..8499c737 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -8,13 +8,16 @@ package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" import ( "crypto" _ "crypto/sha256" + _ "crypto/sha512" "hash" "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/armor" "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/packet" + _ "golang.org/x/crypto/sha3" ) // SignatureType is the armor type for a PGP signature. @@ -119,17 +122,25 @@ ParsePackets: default: continue } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) + if keyring != nil { + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + case *packet.SymmetricallyEncrypted: + if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() { + return nil, errors.UnsupportedError("message is not integrity protected") } - case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: - edp = p.(packet.EncryptedDataPacket) + edp = p + break ParsePackets + case *packet.AEADEncrypted: + edp = p break ParsePackets case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: // This message isn't encrypted. @@ -200,13 +211,11 @@ FindKey: if len(symKeys) != 0 && passphrase != nil { for _, s := range symKeys { key, cipherFunc, err := s.Decrypt(passphrase) - // On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: // only for < 5% of cases we will proceed to decrypt the data if err == nil { decrypted, err = edp.Decrypt(cipherFunc, key) - // TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption, - // but it might still be relevant for when we implement AEAD decryption (otherwise, remove?) - if err != nil && err != errors.ErrKeyIncorrect { + if err != nil { return nil, err } if decrypted != nil { @@ -268,9 +277,11 @@ FindLiteralData: md.IsSigned = true md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] + if keyring != nil { + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } } case *packet.LiteralData: md.LiteralData = p @@ -294,14 +305,14 @@ FindLiteralData: // should be preprocessed (i.e. to normalize line endings). Thus this function // returns two hashes. The second should be used to hash the message itself and // performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if hashId == crypto.MD5 { - return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5") +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { + return nil, nil, errors.UnsupportedError("unsupported hash function") } - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + if !hashFunc.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) } - h := hashId.New() + h := hashFunc.New() switch sigType { case packet.SigTypeBinary: @@ -373,19 +384,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { key := scr.md.SignedBy signatureError := key.PublicKey.VerifySignature(scr.h, sig) if signatureError == nil { - now := scr.config.Now() - if key.Revoked(now) || - key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) - key.Entity.PrimaryIdentity().Revoked(now) { - signatureError = errors.ErrKeyRevoked - } - if sig.SigExpired(now) { - signatureError = errors.ErrSignatureExpired - } - if key.PublicKey.KeyExpired(key.SelfSignature, now) || - key.SelfSignature.SigExpired(now) { - signatureError = errors.ErrKeyExpired - } + signatureError = checkSignatureDetails(key, sig, scr.config) } scr.md.Signature = sig scr.md.SignatureError = signatureError @@ -424,8 +423,24 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { return n, nil } +// VerifyDetachedSignature takes a signed file and a detached signature and +// returns the signature packet and the entity the signature was signed by, +// if any, and a possible signature verification error. +// If the signer isn't known, ErrUnknownIssuer is returned. +func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + var expectedHashes []crypto.Hash + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +} + +// VerifyDetachedSignatureAndHash performs the same actions as +// VerifyDetachedSignature and checks that the expected hash functions were used. +func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +} + // CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, +// returns the entity the signature was signed by, if any, and a possible +// signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { var expectedHashes []crypto.Hash @@ -435,6 +450,11 @@ func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return +} + +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType @@ -443,23 +463,22 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashesLen := len(expectedHashes) packets := packet.NewReader(signature) - var sig *packet.Signature for { p, err = packets.Next() if err == io.EOF { - return nil, errors.ErrUnknownIssuer + return nil, nil, errors.ErrUnknownIssuer } if err != nil { - return nil, err + return nil, nil, err } var ok bool sig, ok = p.(*packet.Signature) if !ok { - return nil, errors.StructuralError("non signature packet found") + return nil, nil, errors.StructuralError("non signature packet found") } if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") + return nil, nil, errors.StructuralError("signature doesn't have an issuer") } issuerKeyId = *sig.IssuerKeyId hashFunc = sig.Hash @@ -470,7 +489,7 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, break } if i+1 == expectedHashesLen { - return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") } } @@ -486,34 +505,21 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, h, wrappedHash, err := hashForSignature(hashFunc, sigType) if err != nil { - return nil, err + return nil, nil, err } if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err + return nil, nil, err } for _, key := range keys { err = key.PublicKey.VerifySignature(h, sig) if err == nil { - now := config.Now() - if key.Revoked(now) || - key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) - key.Entity.PrimaryIdentity().Revoked(now) { - return key.Entity, errors.ErrKeyRevoked - } - if sig.SigExpired(now) { - return key.Entity, errors.ErrSignatureExpired - } - if key.PublicKey.KeyExpired(key.SelfSignature, now) || - key.SelfSignature.SigExpired(now) { - return key.Entity, errors.ErrKeyExpired - } - return key.Entity, nil + return sig, key.Entity, checkSignatureDetails(&key, sig, config) } } - return nil, err + return nil, nil, err } // CheckArmoredDetachedSignature performs the same actions as @@ -526,3 +532,61 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, return CheckDetachedSignature(keyring, signed, body, config) } + +// checkSignatureDetails returns an error if: +// - The signature (or one of the binding signatures mentioned below) +// has a unknown critical notation data subpacket +// - The primary key of the signing entity is revoked +// - The primary identity is revoked +// - The signature is expired +// - The primary key of the signing entity is expired according to the +// primary identity binding signature +// +// ... or, if the signature was signed by a subkey and: +// - The signing subkey is revoked +// - The signing subkey is expired according to the subkey binding signature +// - The signing subkey binding signature is expired +// - The signing subkey cross-signature is expired +// +// NOTE: The order of these checks is important, as the caller may choose to +// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never +// ignore any other errors. +// +// TODO: Also return an error if: +// - The primary key is expired according to a direct-key signature +// - (For V5 keys only:) The direct-key signature (exists and) is expired +func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { + now := config.Now() + primaryIdentity := key.Entity.PrimaryIdentity() + signedBySubKey := key.PublicKey != key.Entity.PrimaryKey + sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} + if signedBySubKey { + sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) + } + for _, sig := range sigsToCheck { + for _, notation := range sig.Notations { + if notation.IsCritical && !config.KnownNotation(notation.Name) { + return errors.SignatureError("unknown critical notation: " + notation.Name) + } + } + } + if key.Entity.Revoked(now) || // primary key is revoked + (signedBySubKey && key.Revoked(now)) || // subkey is revoked + primaryIdentity.Revoked(now) { // primary identity is revoked + return errors.ErrKeyRevoked + } + if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired + return errors.ErrKeyExpired + } + if signedBySubKey { + if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired + return errors.ErrKeyExpired + } + } + for _, sig := range sigsToCheck { + if sig.SigExpired(now) { // any of the relevant signatures are expired + return errors.ErrSignatureExpired + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index 8caed36e..db6dad5c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -1,8 +1,8 @@ package openpgp -const testKey1KeyId = 0xA34D7E18C20C31BB -const testKey3KeyId = 0x338934250CCC0360 -const testKeyP256KeyId = 0xd44a2c495918513e +const testKey1KeyId uint64 = 0xA34D7E18C20C31BB +const testKey3KeyId uint64 = 0x338934250CCC0360 +const testKeyP256KeyId uint64 = 0xd44a2c495918513e const signedInput = "Signed message\nline 2\nline 3\n" const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" @@ -102,11 +102,11 @@ ex7En5r7rHR5xwX82Msc+Rq9dSyO const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` -const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` -const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101` +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` @@ -171,3 +171,104 @@ y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv UQdl5MlBka1QSNbMq2Bz7XwNPg4= =6lbM -----END PGP MESSAGE-----` + +const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN +aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8 +nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl ++bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J +BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK +chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG +ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw +FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln +cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+ +s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh +6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ +sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz +dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt +YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ +1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i +aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr +fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF +gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q +Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj +jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF +qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH +dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI +zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/ +0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8 +9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x +Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH +UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7 +/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ +nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl +owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM +GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu +a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0 +M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD +lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5 +01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb +hW1Hj9AO8lzggBQ= +=Nt+N +-----END PGP PUBLIC KEY BLOCK----- +` + +const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE----- + +wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8 +N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3 +AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI +NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv +KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y +EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN +AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb +UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB +ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+ +7A5CBid5 +=aQkm +-----END PGP SIGNATURE----- +` + +const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE----- + +owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x +U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM +f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10 +2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n +vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA= +=fRXs +-----END PGP MESSAGE-----` + +const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+ +fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5 +GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0 +JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS +YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6 +AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki +Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf +9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa +JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag +Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr +woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb +LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA +SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP +GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2 +bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X +W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD +AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY +hz3tYjKhoFTKEIq3y3Pp +=h/aX +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go index 14f58548..a4369596 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -3,7 +3,8 @@ // license that can be found in the LICENSE file. // Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. +// specified in RFC 4800 section 3.7.1, and Argon2 specified in +// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4. package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" import ( @@ -14,70 +15,47 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "golang.org/x/crypto/argon2" ) -// Config collects configuration parameters for s2k key-stretching -// transformations. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // S2KMode is the mode of s2k function. - // It can be 0 (simple), 1(salted), 3(iterated) - // 2(reserved) 100-110(private/experimental). - S2KMode uint8 - // Hash is the default hash function to be used. If - // nil, SHA256 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 65536 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 16777216 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. See RFC 4880 Section 3.7.1.3. - S2KCount int -} +type Mode uint8 + +// Defines the default S2KMode constants +// +// 0 (simple), 1(salted), 3(iterated), 4(argon2) +const ( + SimpleS2K Mode = 0 + SaltedS2K Mode = 1 + IteratedSaltedS2K Mode = 3 + Argon2S2K Mode = 4 + GnuS2K Mode = 101 +) + +const Argon2SaltSize int = 16 // Params contains all the parameters of the s2k packet type Params struct { // mode is the mode of s2k function. // It can be 0 (simple), 1(salted), 3(iterated) // 2(reserved) 100-110(private/experimental). - mode uint8 + mode Mode // hashId is the ID of the hash function used in any of the modes hashId byte - // salt is a byte array to use as a salt in hashing process - salt []byte + // salt is a byte array to use as a salt in hashing process or argon2 + saltBytes [Argon2SaltSize]byte // countByte is used to determine how many rounds of hashing are to // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. countByte byte -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - return crypto.SHA256 - } - - return c.Hash -} - -// EncodedCount get encoded count -func (c *Config) EncodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 224 // The common case. Corresponding to 16777216 - } - - i := c.S2KCount - - switch { - case i < 65536: - i = 65536 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) + // passes is a parameter in Argon2 to determine the number of iterations + // See RFC the crypto refresh Section 3.7.1.4. + passes byte + // parallelism is a parameter in Argon2 to determine the degree of paralellism + // See RFC the crypto refresh Section 3.7.1.4. + parallelism byte + // memoryExp is a parameter in Argon2 to determine the memory usage + // i.e., 2 ** memoryExp kibibytes + // See RFC the crypto refresh Section 3.7.1.4. + memoryExp byte } // encodeCount converts an iterative "count" in the range 1024 to @@ -106,6 +84,31 @@ func decodeCount(c uint8) int { return (16 + int(c&15)) << (uint32(c>>4) + 6) } +// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to +// 2**31, inclusive, to an encoded memory. The return value is the +// octet that is actually stored in the GPG file. encodeMemory panics +// if is not in the above range +// See OpenPGP crypto refresh Section 3.7.1.4. +func encodeMemory(memory uint32, parallelism uint8) uint8 { + if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { + panic("Memory argument memory is outside the required range") + } + + for exp := 3; exp < 31; exp++ { + compare := decodeMemory(uint8(exp)) + if compare >= memory { + return uint8(exp) + } + } + + return 31 +} + +// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent +func decodeMemory(memoryExponent uint8) uint32 { + return uint32(1) << memoryExponent +} + // Simple writes to out the result of computing the Simple S2K function (RFC // 4880, section 3.7.1.1) using the given hash and input passphrase. func Simple(out []byte, h hash.Hash, in []byte) { @@ -169,25 +172,53 @@ func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { } } +// Argon2 writes to out the key derived from the password (in) with the Argon2 +// function (the crypto refresh, section 3.7.1.4) +func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) { + key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out))) + copy(out[:], key) +} + // Generate generates valid parameters from given configuration. -// It will enforce salted + hashed s2k method +// It will enforce the Iterated and Salted or Argon2 S2K method. func Generate(rand io.Reader, c *Config) (*Params, error) { - hashId, ok := HashToHashId(c.Hash) - if !ok { - return nil, errors.UnsupportedError("no such hash") - } + var params *Params + if c != nil && c.Mode() == Argon2S2K { + // handle Argon2 case + argonConfig := c.Argon2() + params = &Params{ + mode: Argon2S2K, + passes: argonConfig.Passes(), + parallelism: argonConfig.Parallelism(), + memoryExp: argonConfig.EncodedMemory(), + } + } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } - params := &Params{ - mode: 3, // Enforce iterared + salted method - hashId: hashId, - salt: make([]byte, 8), - countByte: c.EncodedCount(), + params = &Params{ + mode: SaltedS2K, + hashId: hashId, + } + } else { // Enforce IteratedSaltedS2K method otherwise + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + if c != nil { + c.S2KMode = IteratedSaltedS2K + } + params = &Params{ + mode: IteratedSaltedS2K, + hashId: hashId, + countByte: c.EncodedCount(), + } } - - if _, err := io.ReadFull(rand, params.salt); err != nil { + if _, err := io.ReadFull(rand, params.salt()); err != nil { return nil, err } - return params, nil } @@ -207,45 +238,60 @@ func Parse(r io.Reader) (f func(out, in []byte), err error) { // ParseIntoParams reads a binary specification for a string-to-key // transformation from r and returns a struct describing the s2k parameters. func ParseIntoParams(r io.Reader) (params *Params, err error) { - var buf [9]byte + var buf [Argon2SaltSize + 3]byte - _, err = io.ReadFull(r, buf[:2]) + _, err = io.ReadFull(r, buf[:1]) if err != nil { return } params = &Params{ - mode: buf[0], - hashId: buf[1], + mode: Mode(buf[0]), } switch params.mode { - case 0: - return params, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) + case SimpleS2K: + _, err = io.ReadFull(r, buf[:1]) if err != nil { return nil, err } - - params.salt = buf[:8] + params.hashId = buf[0] return params, nil - case 3: + case SaltedS2K: _, err = io.ReadFull(r, buf[:9]) if err != nil { return nil, err } - - params.salt = buf[:8] - params.countByte = buf[8] + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) return params, nil - case 101: - // This is a GNU extension. See - // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 - if _, err = io.ReadFull(r, buf[:4]); err != nil { + case IteratedSaltedS2K: + _, err = io.ReadFull(r, buf[:10]) + if err != nil { return nil, err } - if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + params.countByte = buf[9] + return params, nil + case Argon2S2K: + _, err = io.ReadFull(r, buf[:Argon2SaltSize+3]) + if err != nil { + return nil, err + } + copy(params.salt(), buf[:Argon2SaltSize]) + params.passes = buf[Argon2SaltSize] + params.parallelism = buf[Argon2SaltSize+1] + params.memoryExp = buf[Argon2SaltSize+2] + return params, nil + case GnuS2K: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:5]); err != nil { + return nil, err + } + params.hashId = buf[0] + if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 { return params, nil } return nil, errors.UnsupportedError("GNU S2K extension") @@ -255,39 +301,56 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { } func (params *Params) Dummy() bool { - return params != nil && params.mode == 101 + return params != nil && params.mode == GnuS2K +} + +func (params *Params) salt() []byte { + switch params.mode { + case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] + case Argon2S2K: return params.saltBytes[:Argon2SaltSize] + default: return nil + } } func (params *Params) Function() (f func(out, in []byte), err error) { if params.Dummy() { return nil, errors.ErrDummyPrivateKey("dummy key found") } - hashObj, ok := HashIdToHash(params.hashId) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) - } - if !hashObj.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + var hashObj crypto.Hash + if params.mode != Argon2S2K { + var ok bool + hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } } switch params.mode { - case 0: + case SimpleS2K: f := func(out, in []byte) { Simple(out, hashObj.New(), in) } return f, nil - case 1: + case SaltedS2K: f := func(out, in []byte) { - Salted(out, hashObj.New(), in, params.salt) + Salted(out, hashObj.New(), in, params.salt()) } return f, nil - case 3: + case IteratedSaltedS2K: f := func(out, in []byte) { - Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) + Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte)) } + return f, nil + case Argon2S2K: + f := func(out, in []byte) { + Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp) + } return f, nil } @@ -295,23 +358,28 @@ func (params *Params) Function() (f func(out, in []byte), err error) { } func (params *Params) Serialize(w io.Writer) (err error) { - if _, err = w.Write([]byte{params.mode}); err != nil { + if _, err = w.Write([]byte{uint8(params.mode)}); err != nil { return } - if _, err = w.Write([]byte{params.hashId}); err != nil { - return + if params.mode != Argon2S2K { + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } } if params.Dummy() { _, err = w.Write(append([]byte("GNU"), 1)) return } if params.mode > 0 { - if _, err = w.Write(params.salt); err != nil { + if _, err = w.Write(params.salt()); err != nil { return } - if params.mode == 3 { + if params.mode == IteratedSaltedS2K { _, err = w.Write([]byte{params.countByte}) } + if params.mode == Argon2S2K { + _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp}) + } } return } @@ -337,31 +405,3 @@ func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Co f(key, passphrase) return nil } - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - if hash, ok := algorithm.HashById[id]; ok { - return hash.HashFunc(), true - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - if hash, ok := algorithm.HashById[id]; ok { - return hash.String(), true - } - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for id, hash := range algorithm.HashById { - if hash.HashFunc() == h { - return id, true - } - } - return 0, false -} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go new file mode 100644 index 00000000..25a4442d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -0,0 +1,26 @@ +package s2k + +// Cache stores keys derived with s2k functions from one passphrase +// to avoid recomputation if multiple items are encrypted with +// the same parameters. +type Cache map[Params][]byte + +// GetOrComputeDerivedKey tries to retrieve the key +// for the given s2k parameters from the cache. +// If there is no hit, it derives the key with the s2k function from the passphrase, +// updates the cache, and returns the key. +func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) { + key, found := (*c)[*params] + if !found || len(key) != expectedKeySize { + var err error + derivedKey := make([]byte, expectedKeySize) + s2k, err := params.Function() + if err != nil { + return nil, err + } + s2k(derivedKey, passphrase) + (*c)[*params] = key + return derivedKey, nil + } + return key, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go new file mode 100644 index 00000000..b40be522 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -0,0 +1,129 @@ +package s2k + +import "crypto" + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. +type Config struct { + // S2K (String to Key) mode, used for key derivation in the context of secret key encryption + // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used. + // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true, + // s2k.SaltedS2K can also be used. + // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it + //(pending standardisation). + // 0 (simple), 1(salted), 3(iterated), 4(argon2) + // 2(reserved) 100-110(private/experimental). + S2KMode Mode + // Only relevant if S2KMode is not set to s2k.Argon2S2K. + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // Argon2 parameters for S2K (String to Key). + // Only relevant if S2KMode is set to s2k.Argon2S2K. + // If nil, default parameters are used. + // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4. + Argon2Config *Argon2Config + // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K. + // Iteration count for Iterated S2K (String to Key). It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // Indicates whether the passphrase passed by the application is a + // high-entropy key (e.g. it's randomly generated or derived from + // another passphrase using a strong key derivation function). + // When true, allows the S2KMode to be s2k.SaltedS2K. + // When the passphrase is not a high-entropy key, using SaltedS2K is + // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08. + PassphraseIsHighEntropy bool +} + +// Argon2Config stores the Argon2 parameters +// A nil *Argon2Config is valid and results in all default +type Argon2Config struct { + NumberOfPasses uint8 + DegreeOfParallelism uint8 + // The memory parameter for Argon2 specifies desired memory usage in kibibytes. + // For example memory=64*1024 sets the memory cost to ~64 MB. + Memory uint32 +} + +func (c *Config) Mode() Mode { + if c == nil { + return IteratedSaltedS2K + } + return c.S2KMode +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +func (c *Config) Argon2() *Argon2Config { + if c == nil || c.Argon2Config == nil { + return nil + } + return c.Argon2Config +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +func (c *Argon2Config) Passes() uint8 { + if c == nil || c.NumberOfPasses == 0 { + return 3 + } + return c.NumberOfPasses +} + +func (c *Argon2Config) Parallelism() uint8 { + if c == nil || c.DegreeOfParallelism == 0 { + return 4 + } + return c.DegreeOfParallelism +} + +func (c *Argon2Config) EncodedMemory() uint8 { + if c == nil || c.Memory == 0 { + return 16 // 64 MiB of RAM + } + + memory := c.Memory + lowerBound := uint32(c.Parallelism())*8 + upperBound := uint32(2147483648) + + switch { + case memory < lowerBound: + memory = lowerBound + case memory > upperBound: + memory = upperBound + } + + return encodeMemory(memory, c.Parallelism()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index 23a24f20..7fdd13a3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -13,8 +13,8 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/armor" "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/packet" - "github.com/ProtonMail/go-crypto/openpgp/s2k" ) // DetachSign signs message with the private key from signer (which must @@ -70,15 +70,11 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S if signingKey.PrivateKey.Encrypted { return errors.InvalidArgumentError("signing key is encrypted") } + if _, ok := algorithm.HashToHashId(config.Hash()); !ok { + return errors.InvalidArgumentError("invalid hash function") + } - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sigLifetimeSecs := config.SigLifetime() - sig.SigLifetimeSecs = &sigLifetimeSecs - sig.IssuerKeyId = &signingKey.PrivateKey.KeyId + sig := createSignaturePacket(signingKey.PublicKey, sigType, config) h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) if err != nil { @@ -125,16 +121,13 @@ func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHi } var w io.WriteCloser - if config.AEAD() != nil { - w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config) - if err != nil { - return - } - } else { - w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } + cipherSuite := packet.CipherSuite{ + Cipher: config.Cipher(), + Mode: config.AEAD().Mode(), + } + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config) + if err != nil { + return } literalData := w @@ -173,8 +166,25 @@ func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { return a[:j] } +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v[0] == v2[0] && v[1] == v2[1] { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) + v, ok := algorithm.HashToHashId(h) if !ok { panic("tried to convert unknown hash") } @@ -240,7 +250,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit var hash crypto.Hash for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() { hash = h break } @@ -249,7 +259,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit // If the hash specified by config is a candidate, we'll use that. if configuredHash := config.Hash(); configuredHash.Available() { for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash { hash = h break } @@ -258,7 +268,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit if hash == 0 { hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) + name, ok := algorithm.HashIdToString(hashId) if !ok { name = "#" + strconv.Itoa(int(hashId)) } @@ -329,39 +339,39 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En // These are the possible ciphers that we'll use for the message. candidateCiphers := []uint8{ - uint8(packet.CipherAES128), uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), + uint8(packet.CipherAES128), } + // These are the possible hash functions that we'll use for the signature. candidateHashes := []uint8{ hashToHashId(crypto.SHA256), hashToHashId(crypto.SHA384), hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), } - candidateAeadModes := []uint8{ - uint8(packet.AEADModeEAX), - uint8(packet.AEADModeOCB), - uint8(packet.AEADModeExperimentalGCM), + + // Prefer GCM if everyone supports it + candidateCipherSuites := [][2]uint8{ + {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}, } + candidateCompression := []uint8{ uint8(packet.CompressionNone), uint8(packet.CompressionZIP), uint8(packet.CompressionZLIB), } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[0:1] - defaultHashes := candidateHashes[0:1] - defaultAeadModes := candidateAeadModes[0:1] - defaultCompression := candidateCompression[0:1] encryptKeys := make([]Key, len(to)) - // AEAD is used only if every key supports it. - aeadSupported := true + + // AEAD is used only if config enables it and every key supports it + aeadSupported := config.AEAD() != nil for i := range to { var ok bool @@ -371,38 +381,37 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } sig := to[i].PrimaryIdentity().SelfSignature - if sig.AEAD == false { + if !sig.SEIPDv2 { aeadSupported = false } - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - preferredAeadModes := sig.PreferredAEAD - if len(preferredAeadModes) == 0 { - preferredAeadModes = defaultAeadModes - } - preferredCompression := sig.PreferredCompression - if len(preferredCompression) == 0 { - preferredCompression = defaultCompression - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes) - candidateCompression = intersectPreferences(candidateCompression, preferredCompression) + candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) } - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + // In the event that the intersection of supported algorithms is empty we use the ones + // labelled as MUST that every implementation supports. + if len(candidateCiphers) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3 + candidateCiphers = []uint8{uint8(packet.CipherAES128)} + } + if len(candidateHashes) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos + candidateHashes = []uint8{hashToHashId(crypto.SHA256)} + } + if len(candidateCipherSuites) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 + candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}} } cipher := packet.CipherFunction(candidateCiphers[0]) - mode := packet.AEADMode(candidateAeadModes[0]) + aeadCipherSuite := packet.CipherSuite{ + Cipher: packet.CipherFunction(candidateCipherSuites[0][0]), + Mode: packet.AEADMode(candidateCipherSuites[0][1]), + } + // If the cipher specified by config is a candidate, we'll use that. configuredCipher := config.Cipher() for _, c := range candidateCiphers { @@ -425,17 +434,11 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } var payload io.WriteCloser - if config.AEAD() != nil && aeadSupported { - payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) - if err != nil { - return - } - } else { - payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config) - if err != nil { - return - } + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config) + if err != nil { + return } + payload, err = handleCompression(payload, candidateCompression, config) if err != nil { return nil, err @@ -458,8 +461,8 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con hashToHashId(crypto.SHA256), hashToHashId(crypto.SHA384), hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), } defaultHashes := candidateHashes[0:1] preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash @@ -502,15 +505,9 @@ func (s signatureWriter) Write(data []byte) (int, error) { } func (s signatureWriter) Close() error { - sig := &packet.Signature{ - Version: s.signer.Version, - SigType: s.sigType, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - Metadata: s.metadata, - } + sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config) + sig.Hash = s.hashType + sig.Metadata = s.metadata if err := sig.Sign(s.h, s.signer, s.config); err != nil { return err @@ -524,6 +521,21 @@ func (s signatureWriter) Close() error { return s.encryptedData.Close() } +func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature { + sigLifetimeSecs := config.SigLifetime() + return &packet.Signature{ + Version: signer.Version, + SigType: sigType, + PubKeyAlgo: signer.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.KeyId, + IssuerFingerprint: signer.Fingerprint, + Notations: config.Notations(), + SigLifetimeSecs: &sigLifetimeSecs, + } +} + // noOpCloser is like an ioutil.NopCloser, but for an io.Writer. // TODO: we have two of these in OpenPGP packages alone. This probably needs // to be promoted somewhere more common. @@ -545,6 +557,9 @@ func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, if confAlgo == packet.CompressionNone { return } + + // Set algorithm labelled as MUST as fallback + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4 finalAlgo := packet.CompressionNone // if compression specified by config available we will use it for _, c := range candidateCompression { diff --git a/vendor/github.com/ProtonMail/go-mime/BUILD.bazel b/vendor/github.com/ProtonMail/go-mime/BUILD.bazel index 96df1c97..cc3a1a71 100644 --- a/vendor/github.com/ProtonMail/go-mime/BUILD.bazel +++ b/vendor/github.com/ProtonMail/go-mime/BUILD.bazel @@ -11,7 +11,6 @@ go_library( importpath = "github.com/ProtonMail/go-mime", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/sirupsen/logrus", "//vendor/golang.org/x/text/encoding", "//vendor/golang.org/x/text/encoding/htmlindex", "//vendor/golang.org/x/text/transform", diff --git a/vendor/github.com/ProtonMail/go-mime/README.md b/vendor/github.com/ProtonMail/go-mime/README.md index a78b7476..efee623a 100644 --- a/vendor/github.com/ProtonMail/go-mime/README.md +++ b/vendor/github.com/ProtonMail/go-mime/README.md @@ -1,6 +1,25 @@ # Go Mime Wrapper Library +Provides a parser for MIME messages + ## Download/Install Run `go get -u github.com/ProtonMail/go-mime`, or manually `git clone` the repository into `$GOPATH/src/github.com/ProtonMail/go-mime`. + + +## Usage + +The library can be used to extract the body and attachments from a MIME message + +Example: +```go +printAccepter := gomime.NewMIMEPrinter() +bodyCollector := gomime.NewBodyCollector(printAccepter) +attachmentsCollector := gomime.NewAttachmentsCollector(bodyCollector) +mimeVisitor := gomime.NewMimeVisitor(attachmentsCollector) +err := gomime.VisitAll(bytes.NewReader(mmBodyData), h, mimeVisitor) +attachments := attachmentsCollector.GetAttachments(), +attachmentsHeaders := attachmentsCollector.GetAttHeaders() +bodyContent, bodyMimeType := bodyCollector.GetBody() +``` diff --git a/vendor/github.com/ProtonMail/go-mime/encoding.go b/vendor/github.com/ProtonMail/go-mime/encoding.go index cf237e6a..fced2149 100644 --- a/vendor/github.com/ProtonMail/go-mime/encoding.go +++ b/vendor/github.com/ProtonMail/go-mime/encoding.go @@ -1,7 +1,6 @@ package gomime import ( - "bytes" "fmt" "io" "mime" @@ -11,9 +10,9 @@ import ( "unicode/utf8" "encoding/base64" + "golang.org/x/text/encoding" "golang.org/x/text/encoding/htmlindex" - "golang.org/x/text/transform" ) var wordDec = &mime.WordDecoder{ @@ -189,21 +188,13 @@ func DecodeCharset(original []byte, mediaType string, contentTypeParams map[stri } err = fmt.Errorf("non-utf8 content without charset specification") } - if err != nil { return original, err } - - utf8 := make([]byte, len(original)) - nDst, nSrc, err := decoder.Transform(utf8, original, false) - for err == transform.ErrShortDst { - utf8 = make([]byte, (nDst/nSrc+1)*len(original)) - nDst, nSrc, err = decoder.Transform(utf8, original, false) - } + utf8, err := decoder.Bytes(original) if err != nil { return original, err } - utf8 = bytes.Trim(utf8, "\x00") return utf8, nil } diff --git a/vendor/github.com/ProtonMail/go-mime/parser.go b/vendor/github.com/ProtonMail/go-mime/parser.go index 4bbc0d73..e36cd879 100644 --- a/vendor/github.com/ProtonMail/go-mime/parser.go +++ b/vendor/github.com/ProtonMail/go-mime/parser.go @@ -5,6 +5,7 @@ import ( "bytes" "io" "io/ioutil" + "log" "mime" "mime/multipart" "net/http" @@ -12,8 +13,6 @@ import ( "net/textproto" "regexp" "strings" - - log "github.com/sirupsen/logrus" ) // VisitAcceptor decidest what to do with part which is processed @@ -181,7 +180,7 @@ func GetMultipartParts(r io.Reader, params map[string]string) (parts []io.Reader headers = []textproto.MIMEHeader{} var p *multipart.Part for { - p, err = mr.NextPart() + p, err = mr.NextRawPart() if err == io.EOF { err = nil break @@ -275,7 +274,7 @@ func checkHeaders(headers []textproto.MIMEHeader) bool { func decodePart(partReader io.Reader, header textproto.MIMEHeader) (decodedPart io.Reader) { decodedPart = DecodeContentEncoding(partReader, header.Get("Content-Transfer-Encoding")) if decodedPart == nil { - log.Warnf("Unsupported Content-Transfer-Encoding '%v'", header.Get("Content-Transfer-Encoding")) + log.Printf("Unsupported Content-Transfer-Encoding '%v'", header.Get("Content-Transfer-Encoding")) decodedPart = partReader } return @@ -376,7 +375,7 @@ func (ptc *PlainTextCollector) Accept(partReader io.Reader, header textproto.MIM if buffer, err := ioutil.ReadAll(decodedPart); err == nil { buffer, err = DecodeCharset(buffer, mediaType, params) if err != nil { - log.Warnln("Decode charset error:", err) + log.Println("Decode charset error:", err) err = nil // Don't fail parsing on decoding errors, use original } ptc.plainTextContents.Write(buffer) @@ -431,7 +430,7 @@ func (bc *BodyCollector) Accept(partReader io.Reader, header textproto.MIMEHeade if buffer, err := ioutil.ReadAll(decodedPart); err == nil { buffer, err = DecodeCharset(buffer, mediaType, params) if err != nil { - log.Warnln("Decode charset error:", err) + log.Println("Decode charset error:", err) err = nil // Don't fail parsing on decoding errors, use original } if mediaType == "text/html" { @@ -500,7 +499,7 @@ func (ac *AttachmentsCollector) Accept(partReader io.Reader, header textproto.MI if buffer, err := ioutil.ReadAll(decodedPart); err == nil { buffer, err = DecodeCharset(buffer, mediaType, params) if err != nil { - log.Warnln("Decode charset error:", err) + log.Println("Decode charset error:", err) err = nil // Don't fail parsing on decoding errors, use original } headerBuf := new(bytes.Buffer) diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/BUILD.bazel b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/BUILD.bazel index 93e6a86c..093d54b4 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/BUILD.bazel +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "armor.go", "cipher.go", + "context.go", "version.go", ], importmap = "peridot.resf.org/vendor/github.com/ProtonMail/gopenpgp/v2/constants", diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/armor.go b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/armor.go index 2c755774..0d2c1925 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/armor.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/armor.go @@ -3,7 +3,7 @@ package constants // Constants for armored data. const ( - ArmorHeaderVersion = "GopenPGP 2.4.7" + ArmorHeaderVersion = "GopenPGP 2.7.5" ArmorHeaderComment = "https://gopenpgp.org" PGPMessageHeader = "PGP MESSAGE" PGPSignatureHeader = "PGP SIGNATURE" diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/cipher.go b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/cipher.go index 970960ce..ab8c72ee 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/cipher.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/cipher.go @@ -15,6 +15,7 @@ const ( SIGNATURE_NOT_SIGNED int = 1 SIGNATURE_NO_VERIFIER int = 2 SIGNATURE_FAILED int = 3 + SIGNATURE_BAD_CONTEXT int = 4 ) const DefaultCompression = 2 // ZLIB diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/context.go b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/context.go new file mode 100644 index 00000000..ca001526 --- /dev/null +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/context.go @@ -0,0 +1,3 @@ +package constants + +const SignatureContextName = "context@proton.ch" diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/version.go b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/version.go index 29df3e3d..cca8c39c 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/constants/version.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/constants/version.go @@ -1,3 +1,3 @@ package constants -const Version = "2.4.7" +const Version = "2.7.5" diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/BUILD.bazel b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/BUILD.bazel index e8b536a2..25ca3191 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/BUILD.bazel +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "message_getters.go", "mime.go", "password.go", + "sanitize_string.go", "sessionkey.go", "sessionkey_streaming.go", "signature.go", @@ -34,6 +35,8 @@ go_library( "@com_github_protonmail_go_crypto//openpgp", "@com_github_protonmail_go_crypto//openpgp/clearsign", "@com_github_protonmail_go_crypto//openpgp/ecdh", + "@com_github_protonmail_go_crypto//openpgp/ecdsa", + "@com_github_protonmail_go_crypto//openpgp/eddsa", "@com_github_protonmail_go_crypto//openpgp/elgamal", "@com_github_protonmail_go_crypto//openpgp/errors", "@com_github_protonmail_go_crypto//openpgp/packet", diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/attachment_manual.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/attachment_manual.go index 6e33c1f1..39a2957a 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/attachment_manual.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/attachment_manual.go @@ -75,7 +75,7 @@ func (keyRing *KeyRing) NewManualAttachmentProcessor( estimatedSize int, filename string, dataBuffer []byte, ) (*ManualAttachmentProcessor, error) { if len(dataBuffer) == 0 { - return nil, errors.New("gopenpgp: can't give a nil or empty buffer to process the attachement") + return nil, errors.New("gopenpgp: can't give a nil or empty buffer to process the attachment") } // forces the gc to be called often @@ -148,7 +148,7 @@ func (keyRing *KeyRing) NewManualAttachmentProcessor( return attachmentProc, nil } -// readAll works a bit like io.ReadAll +// readAll works a bit like ioutil.ReadAll // but we can choose the buffer to write to // and we don't grow the slice in case of overflow. func readAll(buffer []byte, reader io.Reader) (int, error) { diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/key_clear.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/key_clear.go index 0f2e8952..c628f363 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/key_clear.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/key_clear.go @@ -2,13 +2,13 @@ package crypto import ( "crypto/dsa" //nolint:staticcheck - "crypto/ecdsa" - "crypto/ed25519" "crypto/rsa" "errors" "math/big" "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" ) @@ -57,7 +57,7 @@ func clearPrivateKey(privateKey interface{}) error { return clearElGamalPrivateKey(priv) case *ecdsa.PrivateKey: return clearECDSAPrivateKey(priv) - case *ed25519.PrivateKey: + case *eddsa.PrivateKey: return clearEdDSAPrivateKey(priv) case *ecdh.PrivateKey: return clearECDHPrivateKey(priv) @@ -115,8 +115,8 @@ func clearECDSAPrivateKey(priv *ecdsa.PrivateKey) error { return nil } -func clearEdDSAPrivateKey(priv *ed25519.PrivateKey) error { - clearMem(*priv) +func clearEdDSAPrivateKey(priv *eddsa.PrivateKey) error { + clearMem(priv.D) return nil } diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_message.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_message.go index 4c2c9cd4..f0fc16e0 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_message.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_message.go @@ -2,7 +2,6 @@ package crypto import ( "bytes" - "crypto" "io" "io/ioutil" "time" @@ -18,13 +17,16 @@ import ( // * message : The plaintext input as a PlainMessage. // * privateKey : (optional) an unlocked private keyring to include signature in the message. func (keyRing *KeyRing) Encrypt(message *PlainMessage, privateKey *KeyRing) (*PGPMessage, error) { - config := &packet.Config{DefaultCipher: packet.CipherAES256, Time: getTimeGenerator()} - encrypted, err := asymmetricEncrypt(message, keyRing, privateKey, config) - if err != nil { - return nil, err - } + return asymmetricEncrypt(message, keyRing, privateKey, false, nil) +} - return NewPGPMessage(encrypted), nil +// EncryptWithContext encrypts a PlainMessage, outputs a PGPMessage. +// If an unlocked private key is also provided it will also sign the message. +// * message : The plaintext input as a PlainMessage. +// * privateKey : (optional) an unlocked private keyring to include signature in the message. +// * signingContext : (optional) the context for the signature. +func (keyRing *KeyRing) EncryptWithContext(message *PlainMessage, privateKey *KeyRing, signingContext *SigningContext) (*PGPMessage, error) { + return asymmetricEncrypt(message, keyRing, privateKey, false, signingContext) } // EncryptWithCompression encrypts with compression support a PlainMessage to PGPMessage using public/private keys. @@ -32,60 +34,92 @@ func (keyRing *KeyRing) Encrypt(message *PlainMessage, privateKey *KeyRing) (*PG // * privateKey : (optional) an unlocked private keyring to include signature in the message. // * output : The encrypted data as PGPMessage. func (keyRing *KeyRing) EncryptWithCompression(message *PlainMessage, privateKey *KeyRing) (*PGPMessage, error) { - config := &packet.Config{ - DefaultCipher: packet.CipherAES256, - Time: getTimeGenerator(), - DefaultCompressionAlgo: constants.DefaultCompression, - CompressionConfig: &packet.CompressionConfig{Level: constants.DefaultCompressionLevel}, - } + return asymmetricEncrypt(message, keyRing, privateKey, true, nil) +} - encrypted, err := asymmetricEncrypt(message, keyRing, privateKey, config) - if err != nil { - return nil, err - } - - return NewPGPMessage(encrypted), nil +// EncryptWithContextAndCompression encrypts with compression support a PlainMessage to PGPMessage using public/private keys. +// * message : The plain data as a PlainMessage. +// * privateKey : (optional) an unlocked private keyring to include signature in the message. +// * signingContext : (optional) the context for the signature. +// * output : The encrypted data as PGPMessage. +func (keyRing *KeyRing) EncryptWithContextAndCompression(message *PlainMessage, privateKey *KeyRing, signingContext *SigningContext) (*PGPMessage, error) { + return asymmetricEncrypt(message, keyRing, privateKey, true, signingContext) } // Decrypt decrypts encrypted string using pgp keys, returning a PlainMessage // * message : The encrypted input as a PGPMessage // * verifyKey : Public key for signature verification (optional) // * verifyTime : Time at verification (necessary only if verifyKey is not nil) +// * verificationContext : (optional) the context for the signature verification. // // When verifyKey is not provided, then verifyTime should be zero, and // signature verification will be ignored. func (keyRing *KeyRing) Decrypt( message *PGPMessage, verifyKey *KeyRing, verifyTime int64, ) (*PlainMessage, error) { - return asymmetricDecrypt(message.NewReader(), keyRing, verifyKey, verifyTime) + return asymmetricDecrypt(message.NewReader(), keyRing, verifyKey, verifyTime, nil) +} + +// DecryptWithContext decrypts encrypted string using pgp keys, returning a PlainMessage +// * message : The encrypted input as a PGPMessage +// * verifyKey : Public key for signature verification (optional) +// * verifyTime : Time at verification (necessary only if verifyKey is not nil) +// * verificationContext : (optional) the context for the signature verification. +// +// When verifyKey is not provided, then verifyTime should be zero, and +// signature verification will be ignored. +func (keyRing *KeyRing) DecryptWithContext( + message *PGPMessage, + verifyKey *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (*PlainMessage, error) { + return asymmetricDecrypt(message.NewReader(), keyRing, verifyKey, verifyTime, verificationContext) } // SignDetached generates and returns a PGPSignature for a given PlainMessage. func (keyRing *KeyRing) SignDetached(message *PlainMessage) (*PGPSignature, error) { - signEntity, err := keyRing.getSigningEntity() - if err != nil { - return nil, err - } + return keyRing.SignDetachedWithContext(message, nil) +} - config := &packet.Config{DefaultHash: crypto.SHA512, Time: getTimeGenerator()} - var outBuf bytes.Buffer - // sign bin - if err := openpgp.DetachSign(&outBuf, signEntity, message.NewReader(), config); err != nil { - return nil, errors.Wrap(err, "gopenpgp: error in signing") - } - - return NewPGPSignature(outBuf.Bytes()), nil +// SignDetachedWithContext generates and returns a PGPSignature for a given PlainMessage. +// If a context is provided, it is added to the signature as notation data +// with the name set in `constants.SignatureContextName`. +func (keyRing *KeyRing) SignDetachedWithContext(message *PlainMessage, context *SigningContext) (*PGPSignature, error) { + return signMessageDetached( + keyRing, + message.NewReader(), + message.IsBinary(), + context, + ) } // VerifyDetached verifies a PlainMessage with a detached PGPSignature // and returns a SignatureVerificationError if fails. func (keyRing *KeyRing) VerifyDetached(message *PlainMessage, signature *PGPSignature, verifyTime int64) error { - return verifySignature( + _, err := verifySignature( keyRing.entities, message.NewReader(), signature.GetBinary(), verifyTime, + nil, ) + return err +} + +// VerifyDetachedWithContext verifies a PlainMessage with a detached PGPSignature +// and returns a SignatureVerificationError if fails. +// If a context is provided, it verifies that the signature is valid in the given context, using +// the signature notation with name the name set in `constants.SignatureContextName`. +func (keyRing *KeyRing) VerifyDetachedWithContext(message *PlainMessage, signature *PGPSignature, verifyTime int64, verificationContext *VerificationContext) error { + _, err := verifySignature( + keyRing.entities, + message.NewReader(), + signature.GetBinary(), + verifyTime, + verificationContext, + ) + return err } // SignDetachedEncrypted generates and returns a PGPMessage @@ -122,38 +156,41 @@ func (keyRing *KeyRing) VerifyDetachedEncrypted(message *PlainMessage, encrypted // returns the creation time of the signature if it succeeds // and returns a SignatureVerificationError if fails. func (keyRing *KeyRing) GetVerifiedSignatureTimestamp(message *PlainMessage, signature *PGPSignature, verifyTime int64) (int64, error) { - packets := packet.NewReader(bytes.NewReader(signature.Data)) - var err error - var p packet.Packet - for { - p, err = packets.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - continue - } - sigPacket, ok := p.(*packet.Signature) - if !ok { - continue - } - var outBuf bytes.Buffer - err = sigPacket.Serialize(&outBuf) - if err != nil { - continue - } - err = verifySignature( - keyRing.entities, - message.NewReader(), - outBuf.Bytes(), - verifyTime, - ) - if err != nil { - continue - } - return sigPacket.CreationTime.Unix(), nil + sigPacket, err := verifySignature( + keyRing.entities, + message.NewReader(), + signature.GetBinary(), + verifyTime, + nil, + ) + if err != nil { + return 0, err } - return 0, errors.Wrap(err, "gopenpgp: can't verify any signature packets") + return sigPacket.CreationTime.Unix(), nil +} + +// GetVerifiedSignatureTimestampWithContext verifies a PlainMessage with a detached PGPSignature +// returns the creation time of the signature if it succeeds +// and returns a SignatureVerificationError if fails. +// If a context is provided, it verifies that the signature is valid in the given context, using +// the signature notation with name the name set in `constants.SignatureContextName`. +func (keyRing *KeyRing) GetVerifiedSignatureTimestampWithContext( + message *PlainMessage, + signature *PGPSignature, + verifyTime int64, + verificationContext *VerificationContext, +) (int64, error) { + sigPacket, err := verifySignature( + keyRing.entities, + message.NewReader(), + signature.GetBinary(), + verifyTime, + verificationContext, + ) + if err != nil { + return 0, err + } + return sigPacket.CreationTime.Unix(), nil } // ------ INTERNAL FUNCTIONS ------- @@ -162,8 +199,9 @@ func (keyRing *KeyRing) GetVerifiedSignatureTimestamp(message *PlainMessage, sig func asymmetricEncrypt( plainMessage *PlainMessage, publicKey, privateKey *KeyRing, - config *packet.Config, -) ([]byte, error) { + compress bool, + signingContext *SigningContext, +) (*PGPMessage, error) { var outBuf bytes.Buffer var encryptWriter io.WriteCloser var err error @@ -174,7 +212,7 @@ func asymmetricEncrypt( ModTime: plainMessage.getFormattedTime(), } - encryptWriter, err = asymmetricEncryptStream(hints, &outBuf, &outBuf, publicKey, privateKey, config) + encryptWriter, err = asymmetricEncryptStream(hints, &outBuf, &outBuf, publicKey, privateKey, compress, signingContext) if err != nil { return nil, err } @@ -189,7 +227,7 @@ func asymmetricEncrypt( return nil, errors.Wrap(err, "gopenpgp: error in closing message") } - return outBuf.Bytes(), nil + return &PGPMessage{outBuf.Bytes()}, nil } // Core for encryption+signature (all) functions. @@ -198,10 +236,24 @@ func asymmetricEncryptStream( keyPacketWriter io.Writer, dataPacketWriter io.Writer, publicKey, privateKey *KeyRing, - config *packet.Config, + compress bool, + signingContext *SigningContext, ) (encryptWriter io.WriteCloser, err error) { - var signEntity *openpgp.Entity + config := &packet.Config{ + DefaultCipher: packet.CipherAES256, + Time: getTimeGenerator(), + } + if compress { + config.DefaultCompressionAlgo = constants.DefaultCompression + config.CompressionConfig = &packet.CompressionConfig{Level: constants.DefaultCompressionLevel} + } + + if signingContext != nil { + config.SignatureNotations = append(config.SignatureNotations, signingContext.getNotation()) + } + + var signEntity *openpgp.Entity if privateKey != nil && len(privateKey.entities) > 0 { var err error signEntity, err = privateKey.getSigningEntity() @@ -223,13 +275,18 @@ func asymmetricEncryptStream( // Core for decryption+verification (non streaming) functions. func asymmetricDecrypt( - encryptedIO io.Reader, privateKey *KeyRing, verifyKey *KeyRing, verifyTime int64, + encryptedIO io.Reader, + privateKey *KeyRing, + verifyKey *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, ) (message *PlainMessage, err error) { messageDetails, err := asymmetricDecryptStream( encryptedIO, privateKey, verifyKey, verifyTime, + verificationContext, ) if err != nil { return nil, err @@ -242,7 +299,7 @@ func asymmetricDecrypt( if verifyKey != nil { processSignatureExpiration(messageDetails, verifyTime) - err = verifyDetailsSignature(messageDetails, verifyKey) + err = verifyDetailsSignature(messageDetails, verifyKey, verificationContext) } return &PlainMessage{ @@ -259,6 +316,7 @@ func asymmetricDecryptStream( privateKey *KeyRing, verifyKey *KeyRing, verifyTime int64, + verificationContext *VerificationContext, ) (messageDetails *openpgp.MessageDetails, err error) { privKeyEntries := privateKey.entities var additionalEntries openpgp.EntityList @@ -285,6 +343,10 @@ func asymmetricDecryptStream( }, } + if verificationContext != nil { + config.KnownNotations = map[string]bool{constants.SignatureContextName: true} + } + messageDetails, err = openpgp.ReadMessage(encryptedIO, privKeyEntries, nil, config) if err != nil { return nil, errors.Wrap(err, "gopenpgp: error in reading message") diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_session.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_session.go index ca2a94f9..638c1f78 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_session.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_session.go @@ -55,7 +55,11 @@ Loop: } if !hasPacket { - return nil, errors.Wrap(err, "gopenpgp: couldn't find a session key packet") + if err != nil { + return nil, errors.Wrap(err, "gopenpgp: couldn't find a session key packet") + } else { + return nil, errors.New("gopenpgp: couldn't find a session key packet") + } } if decryptErr != nil { diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_streaming.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_streaming.go index 215212a7..6d99402d 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_streaming.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/keyring_streaming.go @@ -2,12 +2,10 @@ package crypto import ( "bytes" - "crypto" "io" "time" "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/pkg/errors" ) @@ -42,8 +40,89 @@ func (keyRing *KeyRing) EncryptStream( plainMessageMetadata *PlainMessageMetadata, signKeyRing *KeyRing, ) (plainMessageWriter WriteCloser, err error) { - config := &packet.Config{DefaultCipher: packet.CipherAES256, Time: getTimeGenerator()} + return encryptStream( + keyRing, + pgpMessageWriter, + pgpMessageWriter, + plainMessageMetadata, + signKeyRing, + false, + nil, + ) +} +// EncryptStreamWithContext is used to encrypt data as a Writer. +// It takes a writer for the encrypted data and returns a WriteCloser for the plaintext data +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) a context for the embedded signature. +func (keyRing *KeyRing) EncryptStreamWithContext( + pgpMessageWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { + return encryptStream( + keyRing, + pgpMessageWriter, + pgpMessageWriter, + plainMessageMetadata, + signKeyRing, + false, + signingContext, + ) +} + +// EncryptStreamWithCompression is used to encrypt data as a Writer. +// The plaintext data is compressed before being encrypted. +// It takes a writer for the encrypted data and returns a WriteCloser for the plaintext data +// If signKeyRing is not nil, it is used to do an embedded signature. +func (keyRing *KeyRing) EncryptStreamWithCompression( + pgpMessageWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, +) (plainMessageWriter WriteCloser, err error) { + return encryptStream( + keyRing, + pgpMessageWriter, + pgpMessageWriter, + plainMessageMetadata, + signKeyRing, + true, + nil, + ) +} + +// EncryptStreamWithContextAndCompression is used to encrypt data as a Writer. +// The plaintext data is compressed before being encrypted. +// It takes a writer for the encrypted data and returns a WriteCloser for the plaintext data +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) a context for the embedded signature. +func (keyRing *KeyRing) EncryptStreamWithContextAndCompression( + pgpMessageWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { + return encryptStream( + keyRing, + pgpMessageWriter, + pgpMessageWriter, + plainMessageMetadata, + signKeyRing, + true, + signingContext, + ) +} + +func encryptStream( + encryptionKeyRing *KeyRing, + keyPacketWriter Writer, + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + compress bool, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { if plainMessageMetadata == nil { // Use sensible default metadata plainMessageMetadata = &PlainMessageMetadata{ @@ -59,7 +138,7 @@ func (keyRing *KeyRing) EncryptStream( ModTime: time.Unix(plainMessageMetadata.ModTime, 0), } - plainMessageWriter, err = asymmetricEncryptStream(hints, pgpMessageWriter, pgpMessageWriter, keyRing, signKeyRing, config) + plainMessageWriter, err = asymmetricEncryptStream(hints, keyPacketWriter, dataPacketWriter, encryptionKeyRing, signKeyRing, compress, signingContext) if err != nil { return nil, err } @@ -107,28 +186,102 @@ func (keyRing *KeyRing) EncryptSplitStream( plainMessageMetadata *PlainMessageMetadata, signKeyRing *KeyRing, ) (*EncryptSplitResult, error) { - config := &packet.Config{DefaultCipher: packet.CipherAES256, Time: getTimeGenerator()} + return encryptSplitStream( + keyRing, + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + false, + nil, + ) +} - if plainMessageMetadata == nil { - // Use sensible default metadata - plainMessageMetadata = &PlainMessageMetadata{ - IsBinary: true, - Filename: "", - ModTime: GetUnixTime(), - } - } +// EncryptSplitStreamWithContext is used to encrypt data as a stream. +// It takes a writer for the Symmetrically Encrypted Data Packet +// (https://datatracker.ietf.org/doc/html/rfc4880#section-5.7) +// and returns a writer for the plaintext data and the key packet. +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) a context for the embedded signature. +func (keyRing *KeyRing) EncryptSplitStreamWithContext( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (*EncryptSplitResult, error) { + return encryptSplitStream( + keyRing, + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + false, + signingContext, + ) +} - hints := &openpgp.FileHints{ - FileName: plainMessageMetadata.Filename, - IsBinary: plainMessageMetadata.IsBinary, - ModTime: time.Unix(plainMessageMetadata.ModTime, 0), - } +// EncryptSplitStreamWithCompression is used to encrypt data as a stream. +// It takes a writer for the Symmetrically Encrypted Data Packet +// (https://datatracker.ietf.org/doc/html/rfc4880#section-5.7) +// and returns a writer for the plaintext data and the key packet. +// If signKeyRing is not nil, it is used to do an embedded signature. +func (keyRing *KeyRing) EncryptSplitStreamWithCompression( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, +) (*EncryptSplitResult, error) { + return encryptSplitStream( + keyRing, + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + true, + nil, + ) +} +// EncryptSplitStreamWithContextAndCompression is used to encrypt data as a stream. +// It takes a writer for the Symmetrically Encrypted Data Packet +// (https://datatracker.ietf.org/doc/html/rfc4880#section-5.7) +// and returns a writer for the plaintext data and the key packet. +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) a context for the embedded signature. +func (keyRing *KeyRing) EncryptSplitStreamWithContextAndCompression( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (*EncryptSplitResult, error) { + return encryptSplitStream( + keyRing, + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + true, + signingContext, + ) +} + +func encryptSplitStream( + encryptionKeyRing *KeyRing, + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + compress bool, + signingContext *SigningContext, +) (*EncryptSplitResult, error) { var keyPacketBuf bytes.Buffer - plainMessageWriter, err := asymmetricEncryptStream(hints, &keyPacketBuf, dataPacketWriter, keyRing, signKeyRing, config) + plainMessageWriter, err := encryptStream( + encryptionKeyRing, + &keyPacketBuf, + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + compress, + signingContext, + ) if err != nil { return nil, err } + return &EncryptSplitResult{ keyPacketBuf: &keyPacketBuf, plainMessageWriter: plainMessageWriter, @@ -138,10 +291,11 @@ func (keyRing *KeyRing) EncryptSplitStream( // PlainMessageReader is used to wrap the data of the decrypted plain message. // It can be used to read the decrypted data and verify the embedded signature. type PlainMessageReader struct { - details *openpgp.MessageDetails - verifyKeyRing *KeyRing - verifyTime int64 - readAll bool + details *openpgp.MessageDetails + verifyKeyRing *KeyRing + verifyTime int64 + readAll bool + verificationContext *VerificationContext } // GetMetadata returns the metadata of the decrypted message. @@ -173,7 +327,7 @@ func (msg *PlainMessageReader) VerifySignature() (err error) { } if msg.verifyKeyRing != nil { processSignatureExpiration(msg.details, msg.verifyTime) - err = verifyDetailsSignature(msg.details, msg.verifyKeyRing) + err = verifyDetailsSignature(msg.details, msg.verifyKeyRing, msg.verificationContext) } else { err = errors.New("gopenpgp: no verify keyring was provided before decryption") } @@ -190,11 +344,49 @@ func (keyRing *KeyRing) DecryptStream( verifyKeyRing *KeyRing, verifyTime int64, ) (plainMessage *PlainMessageReader, err error) { - messageDetails, err := asymmetricDecryptStream( - message, + return decryptStream( keyRing, + message, verifyKeyRing, verifyTime, + nil, + ) +} + +// DecryptStreamWithContext is used to decrypt a pgp message as a Reader. +// It takes a reader for the message data +// and returns a PlainMessageReader for the plaintext data. +// If verifyKeyRing is not nil, PlainMessageReader.VerifySignature() will +// verify the embedded signature with the given key ring and verification time. +// * verificationContext (optional): context for the signature verification. +func (keyRing *KeyRing) DecryptStreamWithContext( + message Reader, + verifyKeyRing *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (plainMessage *PlainMessageReader, err error) { + return decryptStream( + keyRing, + message, + verifyKeyRing, + verifyTime, + verificationContext, + ) +} + +func decryptStream( + decryptionKeyRing *KeyRing, + message Reader, + verifyKeyRing *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (plainMessage *PlainMessageReader, err error) { + messageDetails, err := asymmetricDecryptStream( + message, + decryptionKeyRing, + verifyKeyRing, + verifyTime, + verificationContext, ) if err != nil { return nil, err @@ -205,6 +397,7 @@ func (keyRing *KeyRing) DecryptStream( verifyKeyRing, verifyTime, false, + verificationContext, }, err } @@ -229,21 +422,45 @@ func (keyRing *KeyRing) DecryptSplitStream( ) } +// DecryptSplitStreamWithContext is used to decrypt a split pgp message as a Reader. +// It takes a key packet and a reader for the data packet +// and returns a PlainMessageReader for the plaintext data. +// If verifyKeyRing is not nil, PlainMessageReader.VerifySignature() will +// verify the embedded signature with the given key ring and verification time. +// * verificationContext (optional): context for the signature verification. +func (keyRing *KeyRing) DecryptSplitStreamWithContext( + keypacket []byte, + dataPacketReader Reader, + verifyKeyRing *KeyRing, verifyTime int64, + verificationContext *VerificationContext, +) (plainMessage *PlainMessageReader, err error) { + messageReader := io.MultiReader( + bytes.NewReader(keypacket), + dataPacketReader, + ) + return keyRing.DecryptStreamWithContext( + messageReader, + verifyKeyRing, + verifyTime, + verificationContext, + ) +} + // SignDetachedStream generates and returns a PGPSignature for a given message Reader. func (keyRing *KeyRing) SignDetachedStream(message Reader) (*PGPSignature, error) { - signEntity, err := keyRing.getSigningEntity() - if err != nil { - return nil, err - } + return keyRing.SignDetachedStreamWithContext(message, nil) +} - config := &packet.Config{DefaultHash: crypto.SHA512, Time: getTimeGenerator()} - var outBuf bytes.Buffer - // sign bin - if err := openpgp.DetachSign(&outBuf, signEntity, message, config); err != nil { - return nil, errors.Wrap(err, "gopenpgp: error in signing") - } - - return NewPGPSignature(outBuf.Bytes()), nil +// SignDetachedStreamWithContext generates and returns a PGPSignature for a given message Reader. +// If a context is provided, it is added to the signature as notation data +// with the name set in `constants.SignatureContextName`. +func (keyRing *KeyRing) SignDetachedStreamWithContext(message Reader, context *SigningContext) (*PGPSignature, error) { + return signMessageDetached( + keyRing, + message, + true, + context, + ) } // VerifyDetachedStream verifies a message reader with a detached PGPSignature @@ -253,12 +470,34 @@ func (keyRing *KeyRing) VerifyDetachedStream( signature *PGPSignature, verifyTime int64, ) error { - return verifySignature( + _, err := verifySignature( keyRing.entities, message, signature.GetBinary(), verifyTime, + nil, ) + return err +} + +// VerifyDetachedStreamWithContext verifies a message reader with a detached PGPSignature +// and returns a SignatureVerificationError if fails. +// If a context is provided, it verifies that the signature is valid in the given context, using +// the signature notations. +func (keyRing *KeyRing) VerifyDetachedStreamWithContext( + message Reader, + signature *PGPSignature, + verifyTime int64, + verificationContext *VerificationContext, +) error { + _, err := verifySignature( + keyRing.entities, + message, + signature.GetBinary(), + verifyTime, + verificationContext, + ) + return err } // SignDetachedEncryptedStream generates and returns a PGPMessage diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/message.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/message.go index ccdb5d33..284098fe 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/message.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/message.go @@ -3,6 +3,7 @@ package crypto import ( "bytes" "encoding/base64" + "encoding/json" goerrors "errors" "io" "io/ioutil" @@ -92,7 +93,7 @@ func NewPlainMessageFromFile(data []byte, filename string, time uint32) *PlainMe // This allows seamless conversion to clear text signed messages (see RFC 4880 5.2.1 and 7.1). func NewPlainMessageFromString(text string) *PlainMessage { return &PlainMessage{ - Data: []byte(internal.CanonicalizeAndTrim(text)), + Data: []byte(internal.Canonicalize(text)), TextType: true, Filename: "", Time: uint32(GetUnixTime()), @@ -202,7 +203,7 @@ func (msg *PlainMessage) GetBinary() []byte { // GetString returns the content of the message as a string. func (msg *PlainMessage) GetString() string { - return strings.ReplaceAll(string(msg.Data), "\r\n", "\n") + return sanitizeString(strings.ReplaceAll(string(msg.Data), "\r\n", "\n")) } // GetBase64 returns the base-64 encoded binary content of the message as a @@ -287,6 +288,21 @@ func (msg *PGPMessage) GetHexEncryptionKeyIDs() ([]string, bool) { return getHexKeyIDs(msg.GetEncryptionKeyIDs()) } +// GetHexEncryptionKeyIDsJson returns the key IDs of the keys to which the session key is encrypted as a JSON array. +// If an error occurs it returns nil. +// Helper function for go-mobile clients. +func (msg *PGPMessage) GetHexEncryptionKeyIDsJson() []byte { + hexIds, ok := msg.GetHexEncryptionKeyIDs() + if !ok { + return nil + } + hexIdsJson, err := json.Marshal(hexIds) + if err != nil { + return nil + } + return hexIdsJson +} + // GetSignatureKeyIDs Returns the key IDs of the keys to which the (readable) signature packets are encrypted to. func (msg *PGPMessage) GetSignatureKeyIDs() ([]uint64, bool) { return getSignatureKeyIDs(msg.Data) @@ -297,6 +313,20 @@ func (msg *PGPMessage) GetHexSignatureKeyIDs() ([]string, bool) { return getHexKeyIDs(msg.GetSignatureKeyIDs()) } +// GetHexSignatureKeyIDsJson returns the key IDs of the keys to which the (readable) signature packets +// are encrypted to as a JSON array. Helper function for go-mobile clients. +func (msg *PGPMessage) GetHexSignatureKeyIDsJson() []byte { + sigHexSigIds, ok := msg.GetHexSignatureKeyIDs() + if !ok { + return nil + } + sigHexKeyIdsJSON, err := json.Marshal(sigHexSigIds) + if err != nil { + return nil + } + return sigHexKeyIdsJSON +} + // GetBinaryDataPacket returns the unarmored binary datapacket as a []byte. func (msg *PGPSplitMessage) GetBinaryDataPacket() []byte { return msg.DataPacket @@ -324,6 +354,27 @@ func (msg *PGPSplitMessage) GetPGPMessage() *PGPMessage { return NewPGPMessage(append(msg.KeyPacket, msg.DataPacket...)) } +// GetNumberOfKeyPackets returns the number of keys packets in this message. +func (msg *PGPSplitMessage) GetNumberOfKeyPackets() (int, error) { + bytesReader := bytes.NewReader(msg.KeyPacket) + packets := packet.NewReader(bytesReader) + var keyPacketCount int + for { + p, err := packets.Next() + if goerrors.Is(err, io.EOF) { + break + } + if err != nil { + return 0, err + } + switch p.(type) { + case *packet.SymmetricKeyEncrypted, *packet.EncryptedKey: + keyPacketCount += 1 + } + } + return keyPacketCount, nil +} + // SplitMessage splits the message into key and data packet(s). // Parameters are for backwards compatibility and are unused. func (msg *PGPMessage) SplitMessage() (*PGPSplitMessage, error) { diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/mime.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/mime.go index 4d55cd00..d756dfcc 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/mime.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/mime.go @@ -49,7 +49,8 @@ func (keyRing *KeyRing) DecryptMIMEMessage( callbacks.OnVerified(constants.SIGNATURE_OK) } bodyContent, bodyMimeType := body.GetBody() - callbacks.OnBody(bodyContent, bodyMimeType) + bodyContentSanitized := sanitizeString(bodyContent) + callbacks.OnBody(bodyContentSanitized, bodyMimeType) for i := 0; i < len(attachments); i++ { callbacks.OnAttachment(attachmentHeaders[i], []byte(attachments[i])) } diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sanitize_string.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sanitize_string.go new file mode 100644 index 00000000..854e11f3 --- /dev/null +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sanitize_string.go @@ -0,0 +1,7 @@ +package crypto + +import "strings" + +func sanitizeString(input string) string { + return strings.ToValidUTF8(input, "\ufffd") +} diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey.go index 481e505b..a523fce8 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey.go @@ -138,17 +138,7 @@ func newSessionKeyFromEncrypted(ek *packet.EncryptedKey) (*SessionKey, error) { // * message : The plain data as a PlainMessage. // * output : The encrypted data as PGPMessage. func (sk *SessionKey) Encrypt(message *PlainMessage) ([]byte, error) { - dc, err := sk.GetCipherFunc() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to encrypt with session key") - } - - config := &packet.Config{ - Time: getTimeGenerator(), - DefaultCipher: dc, - } - - return encryptWithSessionKey(message, sk, nil, config) + return encryptWithSessionKey(message, sk, nil, false, nil) } // EncryptAndSign encrypts a PlainMessage to PGPMessage with a SessionKey and signs it with a Private key. @@ -156,59 +146,50 @@ func (sk *SessionKey) Encrypt(message *PlainMessage) ([]byte, error) { // * signKeyRing: The KeyRing to sign the message // * output : The encrypted data as PGPMessage. func (sk *SessionKey) EncryptAndSign(message *PlainMessage, signKeyRing *KeyRing) ([]byte, error) { - dc, err := sk.GetCipherFunc() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to encrypt with session key") - } + return encryptWithSessionKey(message, sk, signKeyRing, false, nil) +} - config := &packet.Config{ - Time: getTimeGenerator(), - DefaultCipher: dc, - } - - signEntity, err := signKeyRing.getSigningEntity() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to sign") - } - - return encryptWithSessionKey(message, sk, signEntity, config) +// EncryptAndSignWithContext encrypts a PlainMessage to PGPMessage with a SessionKey and signs it with a Private key. +// * message : The plain data as a PlainMessage. +// * signKeyRing: The KeyRing to sign the message +// * output : The encrypted data as PGPMessage. +// * signingContext : (optional) the context for the signature. +func (sk *SessionKey) EncryptAndSignWithContext(message *PlainMessage, signKeyRing *KeyRing, signingContext *SigningContext) ([]byte, error) { + return encryptWithSessionKey(message, sk, signKeyRing, false, signingContext) } // EncryptWithCompression encrypts with compression support a PlainMessage to PGPMessage with a SessionKey. // * message : The plain data as a PlainMessage. // * output : The encrypted data as PGPMessage. func (sk *SessionKey) EncryptWithCompression(message *PlainMessage) ([]byte, error) { - dc, err := sk.GetCipherFunc() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to encrypt with session key") - } - - config := &packet.Config{ - Time: getTimeGenerator(), - DefaultCipher: dc, - DefaultCompressionAlgo: constants.DefaultCompression, - CompressionConfig: &packet.CompressionConfig{Level: constants.DefaultCompressionLevel}, - } - - return encryptWithSessionKey(message, sk, nil, config) + return encryptWithSessionKey(message, sk, nil, true, nil) } -func encryptWithSessionKey(message *PlainMessage, sk *SessionKey, signEntity *openpgp.Entity, config *packet.Config) ([]byte, error) { +func encryptWithSessionKey( + message *PlainMessage, + sk *SessionKey, + signKeyRing *KeyRing, + compress bool, + signingContext *SigningContext, +) ([]byte, error) { var encBuf = new(bytes.Buffer) encryptWriter, signWriter, err := encryptStreamWithSessionKey( - message.IsBinary(), - message.Filename, - message.Time, + NewPlainMessageMetadata( + message.IsBinary(), + message.Filename, + int64(message.Time), + ), encBuf, sk, - signEntity, - config, + signKeyRing, + compress, + signingContext, ) if err != nil { return nil, err } - if signEntity != nil { + if signKeyRing != nil { _, err = signWriter.Write(message.GetBinary()) if err != nil { return nil, errors.Wrap(err, "gopenpgp: error in writing signed message") @@ -231,6 +212,61 @@ func encryptWithSessionKey(message *PlainMessage, sk *SessionKey, signEntity *op } func encryptStreamWithSessionKey( + plainMessageMetadata *PlainMessageMetadata, + dataPacketWriter io.Writer, + sk *SessionKey, + signKeyRing *KeyRing, + compress bool, + signingContext *SigningContext, +) (encryptWriter, signWriter io.WriteCloser, err error) { + dc, err := sk.GetCipherFunc() + if err != nil { + return nil, nil, errors.Wrap(err, "gopenpgp: unable to encrypt with session key") + } + + config := &packet.Config{ + Time: getTimeGenerator(), + DefaultCipher: dc, + } + + var signEntity *openpgp.Entity + if signKeyRing != nil { + signEntity, err = signKeyRing.getSigningEntity() + if err != nil { + return nil, nil, errors.Wrap(err, "gopenpgp: unable to sign") + } + } + + if compress { + config.DefaultCompressionAlgo = constants.DefaultCompression + config.CompressionConfig = &packet.CompressionConfig{Level: constants.DefaultCompressionLevel} + } + + if signingContext != nil { + config.SignatureNotations = append(config.SignatureNotations, signingContext.getNotation()) + } + + if plainMessageMetadata == nil { + // Use sensible default metadata + plainMessageMetadata = &PlainMessageMetadata{ + IsBinary: true, + Filename: "", + ModTime: GetUnixTime(), + } + } + + return encryptStreamWithSessionKeyAndConfig( + plainMessageMetadata.IsBinary, + plainMessageMetadata.Filename, + uint32(plainMessageMetadata.ModTime), + dataPacketWriter, + sk, + signEntity, + config, + ) +} + +func encryptStreamWithSessionKeyAndConfig( isBinary bool, filename string, modTime uint32, @@ -239,7 +275,15 @@ func encryptStreamWithSessionKey( signEntity *openpgp.Entity, config *packet.Config, ) (encryptWriter, signWriter io.WriteCloser, err error) { - encryptWriter, err = packet.SerializeSymmetricallyEncrypted(dataPacketWriter, config.Cipher(), sk.Key, config) + encryptWriter, err = packet.SerializeSymmetricallyEncrypted( + dataPacketWriter, + config.Cipher(), + config.AEAD() != nil, + packet.CipherSuite{Cipher: config.Cipher(), Mode: config.AEAD().Mode()}, + sk.Key, + config, + ) + if err != nil { return nil, nil, errors.Wrap(err, "gopenpgp: unable to encrypt") } @@ -289,9 +333,41 @@ func (sk *SessionKey) Decrypt(dataPacket []byte) (*PlainMessage, error) { // * verifyTime: when should the signature be valid, as timestamp. If 0 time verification is disabled. // * output: PlainMessage. func (sk *SessionKey) DecryptAndVerify(dataPacket []byte, verifyKeyRing *KeyRing, verifyTime int64) (*PlainMessage, error) { + return decryptWithSessionKeyAndContext( + sk, + dataPacket, + verifyKeyRing, + verifyTime, + nil, + ) +} + +// DecryptAndVerifyWithContext decrypts pgp data packets using directly a session key and verifies embedded signatures. +// * encrypted: PGPMessage. +// * verifyKeyRing: KeyRing with verification public keys +// * verifyTime: when should the signature be valid, as timestamp. If 0 time verification is disabled. +// * output: PlainMessage. +// * verificationContext (optional): context for the signature verification. +func (sk *SessionKey) DecryptAndVerifyWithContext(dataPacket []byte, verifyKeyRing *KeyRing, verifyTime int64, verificationContext *VerificationContext) (*PlainMessage, error) { + return decryptWithSessionKeyAndContext( + sk, + dataPacket, + verifyKeyRing, + verifyTime, + verificationContext, + ) +} + +func decryptWithSessionKeyAndContext( + sk *SessionKey, + dataPacket []byte, + verifyKeyRing *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (*PlainMessage, error) { var messageReader = bytes.NewReader(dataPacket) - md, err := decryptStreamWithSessionKey(sk, messageReader, verifyKeyRing) + md, err := decryptStreamWithSessionKey(sk, messageReader, verifyKeyRing, verificationContext) if err != nil { return nil, err } @@ -303,7 +379,7 @@ func (sk *SessionKey) DecryptAndVerify(dataPacket []byte, verifyKeyRing *KeyRing if verifyKeyRing != nil { processSignatureExpiration(md, verifyTime) - err = verifyDetailsSignature(md, verifyKeyRing) + err = verifyDetailsSignature(md, verifyKeyRing, verificationContext) } return &PlainMessage{ @@ -314,7 +390,12 @@ func (sk *SessionKey) DecryptAndVerify(dataPacket []byte, verifyKeyRing *KeyRing }, err } -func decryptStreamWithSessionKey(sk *SessionKey, messageReader io.Reader, verifyKeyRing *KeyRing) (*openpgp.MessageDetails, error) { +func decryptStreamWithSessionKey( + sk *SessionKey, + messageReader io.Reader, + verifyKeyRing *KeyRing, + verificationContext *VerificationContext, +) (*openpgp.MessageDetails, error) { var decrypted io.ReadCloser var keyring openpgp.EntityList @@ -327,17 +408,24 @@ func decryptStreamWithSessionKey(sk *SessionKey, messageReader io.Reader, verify // Decrypt data packet switch p := p.(type) { - case *packet.SymmetricallyEncrypted: + case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: + if symPacket, ok := p.(*packet.SymmetricallyEncrypted); ok { + if !symPacket.IntegrityProtected { + return nil, errors.New("gopenpgp: message is not authenticated") + } + } dc, err := sk.GetCipherFunc() if err != nil { return nil, errors.Wrap(err, "gopenpgp: unable to decrypt with session key") } - - decrypted, err = p.Decrypt(dc, sk.Key) + encryptedDataPacket, isDataPacket := p.(packet.EncryptedDataPacket) + if !isDataPacket { + return nil, errors.Wrap(err, "gopenpgp: unknown data packet") + } + decrypted, err = encryptedDataPacket.Decrypt(dc, sk.Key) if err != nil { return nil, errors.Wrap(err, "gopenpgp: unable to decrypt symmetric packet") } - default: return nil, errors.New("gopenpgp: invalid packet type") } @@ -346,6 +434,10 @@ func decryptStreamWithSessionKey(sk *SessionKey, messageReader io.Reader, verify Time: getTimeGenerator(), } + if verificationContext != nil { + config.KnownNotations = map[string]bool{constants.SignatureContextName: true} + } + // Push decrypted packet as literal packet and use openpgp's reader if verifyKeyRing != nil { keyring = verifyKeyRing.entities diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey_streaming.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey_streaming.go index 02e80ab4..200ca639 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey_streaming.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/sessionkey_streaming.go @@ -1,8 +1,6 @@ package crypto import ( - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/pkg/errors" ) @@ -30,40 +28,87 @@ func (sk *SessionKey) EncryptStream( plainMessageMetadata *PlainMessageMetadata, signKeyRing *KeyRing, ) (plainMessageWriter WriteCloser, err error) { - dc, err := sk.GetCipherFunc() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to encrypt with session key") - } + return sk.encryptStream( + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + false, + nil, + ) +} - config := &packet.Config{ - Time: getTimeGenerator(), - DefaultCipher: dc, - } - var signEntity *openpgp.Entity - if signKeyRing != nil { - signEntity, err = signKeyRing.getSigningEntity() - if err != nil { - return nil, errors.Wrap(err, "gopenpgp: unable to sign") - } - } +// EncryptStreamWithContext is used to encrypt data as a Writer. +// It takes a writer for the encrypted data packet and returns a writer for the plaintext data. +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) the context for the signature. +func (sk *SessionKey) EncryptStreamWithContext( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { + return sk.encryptStream( + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + false, + signingContext, + ) +} - if plainMessageMetadata == nil { - // Use sensible default metadata - plainMessageMetadata = &PlainMessageMetadata{ - IsBinary: true, - Filename: "", - ModTime: GetUnixTime(), - } - } +// EncryptStreamWithCompression is used to encrypt data as a Writer. +// The plaintext data is compressed before being encrypted. +// It takes a writer for the encrypted data packet and returns a writer for the plaintext data. +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) the context for the signature. +func (sk *SessionKey) EncryptStreamWithCompression( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, +) (plainMessageWriter WriteCloser, err error) { + return sk.encryptStream( + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + true, + nil, + ) +} +// EncryptStreamWithContextAndCompression is used to encrypt data as a Writer. +// The plaintext data is compressed before being encrypted. +// It takes a writer for the encrypted data packet and returns a writer for the plaintext data. +// If signKeyRing is not nil, it is used to do an embedded signature. +// * signingContext : (optional) the context for the signature. +func (sk *SessionKey) EncryptStreamWithContextAndCompression( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { + return sk.encryptStream( + dataPacketWriter, + plainMessageMetadata, + signKeyRing, + true, + signingContext, + ) +} + +func (sk *SessionKey) encryptStream( + dataPacketWriter Writer, + plainMessageMetadata *PlainMessageMetadata, + signKeyRing *KeyRing, + compress bool, + signingContext *SigningContext, +) (plainMessageWriter WriteCloser, err error) { encryptWriter, signWriter, err := encryptStreamWithSessionKey( - plainMessageMetadata.IsBinary, - plainMessageMetadata.Filename, - uint32(plainMessageMetadata.ModTime), + plainMessageMetadata, dataPacketWriter, sk, - signEntity, - config, + signKeyRing, + compress, + signingContext, ) if err != nil { @@ -87,10 +132,48 @@ func (sk *SessionKey) DecryptStream( verifyKeyRing *KeyRing, verifyTime int64, ) (plainMessage *PlainMessageReader, err error) { - messageDetails, err := decryptStreamWithSessionKey( + return decryptStreamWithSessionKeyAndContext( sk, dataPacketReader, verifyKeyRing, + verifyTime, + nil, + ) +} + +// DecryptStreamWithContext is used to decrypt a data packet as a Reader. +// It takes a reader for the data packet +// and returns a PlainMessageReader for the plaintext data. +// If verifyKeyRing is not nil, PlainMessageReader.VerifySignature() will +// verify the embedded signature with the given key ring and verification time. +// * verificationContext (optional): context for the signature verification. +func (sk *SessionKey) DecryptStreamWithContext( + dataPacketReader Reader, + verifyKeyRing *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (plainMessage *PlainMessageReader, err error) { + return decryptStreamWithSessionKeyAndContext( + sk, + dataPacketReader, + verifyKeyRing, + verifyTime, + verificationContext, + ) +} + +func decryptStreamWithSessionKeyAndContext( + sessionKey *SessionKey, + dataPacketReader Reader, + verifyKeyRing *KeyRing, + verifyTime int64, + verificationContext *VerificationContext, +) (plainMessage *PlainMessageReader, err error) { + messageDetails, err := decryptStreamWithSessionKey( + sessionKey, + dataPacketReader, + verifyKeyRing, + verificationContext, ) if err != nil { return nil, errors.Wrap(err, "gopenpgp: error in reading message") @@ -101,5 +184,6 @@ func (sk *SessionKey) DecryptStream( verifyKeyRing, verifyTime, false, + verificationContext, }, err } diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature.go index 67c1d7c2..f5c58ab3 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature.go @@ -3,7 +3,6 @@ package crypto import ( "bytes" "crypto" - "errors" "fmt" "io" "math" @@ -12,6 +11,7 @@ import ( "github.com/ProtonMail/go-crypto/openpgp" pgpErrors "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/pkg/errors" "github.com/ProtonMail/gopenpgp/v2/constants" "github.com/ProtonMail/gopenpgp/v2/internal" @@ -29,23 +29,41 @@ var allowedHashes = []crypto.Hash{ type SignatureVerificationError struct { Status int Message string + Cause error } // Error is the base method for all errors. func (e SignatureVerificationError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("Signature Verification Error: %v caused by %v", e.Message, e.Cause) + } return fmt.Sprintf("Signature Verification Error: %v", e.Message) } +// Unwrap returns the cause of failure. +func (e SignatureVerificationError) Unwrap() error { + return e.Cause +} + // ------------------ // Internal functions // ------------------ // newSignatureFailed creates a new SignatureVerificationError, type // SignatureFailed. -func newSignatureFailed() SignatureVerificationError { +func newSignatureBadContext(cause error) SignatureVerificationError { + return SignatureVerificationError{ + Status: constants.SIGNATURE_BAD_CONTEXT, + Message: "Invalid signature context", + Cause: cause, + } +} + +func newSignatureFailed(cause error) SignatureVerificationError { return SignatureVerificationError{ Status: constants.SIGNATURE_FAILED, Message: "Invalid signature", + Cause: cause, } } @@ -98,7 +116,7 @@ func processSignatureExpiration(md *openpgp.MessageDetails, verifyTime int64) { } // verifyDetailsSignature verifies signature from message details. -func verifyDetailsSignature(md *openpgp.MessageDetails, verifierKey *KeyRing) error { +func verifyDetailsSignature(md *openpgp.MessageDetails, verifierKey *KeyRing, verificationContext *VerificationContext) error { if !md.IsSigned { return newSignatureNotSigned() } @@ -108,18 +126,113 @@ func verifyDetailsSignature(md *openpgp.MessageDetails, verifierKey *KeyRing) er return newSignatureNoVerifier() } if md.SignatureError != nil { - return newSignatureFailed() + return newSignatureFailed(md.SignatureError) } if md.Signature == nil || md.Signature.Hash < allowedHashes[0] || md.Signature.Hash > allowedHashes[len(allowedHashes)-1] { return newSignatureInsecure() } + if verificationContext != nil { + err := verificationContext.verifyContext(md.Signature) + if err != nil { + return newSignatureBadContext(err) + } + } + + return nil +} + +// SigningContext gives the context that will be +// included in the signature's notation data. +type SigningContext struct { + Value string + IsCritical bool +} + +// NewSigningContext creates a new signing context. +// The value is set to the notation data. +// isCritical controls whether the notation is flagged as a critical packet. +func NewSigningContext(value string, isCritical bool) *SigningContext { + return &SigningContext{Value: value, IsCritical: isCritical} +} + +func (context *SigningContext) getNotation() *packet.Notation { + return &packet.Notation{ + Name: constants.SignatureContextName, + Value: []byte(context.Value), + IsCritical: context.IsCritical, + IsHumanReadable: true, + } +} + +// VerificationContext gives the context that will be +// used to verify the signature. +type VerificationContext struct { + Value string + IsRequired bool + RequiredAfter int64 +} + +// NewVerificationContext creates a new verification context. +// The value is checked against the signature's notation data. +// If isRequired is false, the signature is allowed to have no context set. +// If requiredAfter is != 0, the signature is allowed to have no context set if it +// was created before the unix time set in requiredAfter. +func NewVerificationContext(value string, isRequired bool, requiredAfter int64) *VerificationContext { + return &VerificationContext{ + Value: value, + IsRequired: isRequired, + RequiredAfter: requiredAfter, + } +} + +func (context *VerificationContext) isRequiredAtTime(signatureTime time.Time) bool { + return context.IsRequired && + (context.RequiredAfter == 0 || signatureTime.After(time.Unix(context.RequiredAfter, 0))) +} + +func findContext(notations []*packet.Notation) (string, error) { + context := "" + for _, notation := range notations { + if notation.Name == constants.SignatureContextName { + if context != "" { + return "", errors.New("gopenpgp: signature has multiple context notations") + } + if !notation.IsHumanReadable { + return "", errors.New("gopenpgp: context notation was not set as human-readable") + } + context = string(notation.Value) + } + } + return context, nil +} + +func (context *VerificationContext) verifyContext(sig *packet.Signature) error { + signatureContext, err := findContext(sig.Notations) + if err != nil { + return err + } + if signatureContext != context.Value { + contextRequired := context.isRequiredAtTime(sig.CreationTime) + if contextRequired { + return errors.New("gopenpgp: signature did not have the required context") + } else if signatureContext != "" { + return errors.New("gopenpgp: signature had a wrong context") + } + } + return nil } // verifySignature verifies if a signature is valid with the entity list. -func verifySignature(pubKeyEntries openpgp.EntityList, origText io.Reader, signature []byte, verifyTime int64) error { +func verifySignature( + pubKeyEntries openpgp.EntityList, + origText io.Reader, + signature []byte, + verifyTime int64, + verificationContext *VerificationContext, +) (*packet.Signature, error) { config := &packet.Config{} if verifyTime == 0 { config.Time = func() time.Time { @@ -130,32 +243,90 @@ func verifySignature(pubKeyEntries openpgp.EntityList, origText io.Reader, signa return time.Unix(verifyTime+internal.CreationTimeOffset, 0) } } + + if verificationContext != nil { + config.KnownNotations = map[string]bool{constants.SignatureContextName: true} + } signatureReader := bytes.NewReader(signature) - signer, err := openpgp.CheckDetachedSignatureAndHash(pubKeyEntries, origText, signatureReader, allowedHashes, config) + sig, signer, err := openpgp.VerifyDetachedSignatureAndHash(pubKeyEntries, origText, signatureReader, allowedHashes, config) - if errors.Is(err, pgpErrors.ErrSignatureExpired) && signer != nil && verifyTime > 0 { - // if verifyTime = 0: time check disabled, everything is okay - // Maybe the creation time offset pushed it over the edge - // Retry with the actual verification time - config.Time = func() time.Time { - return time.Unix(verifyTime, 0) - } + if sig != nil && signer != nil && (errors.Is(err, pgpErrors.ErrSignatureExpired) || errors.Is(err, pgpErrors.ErrKeyExpired)) { //nolint:nestif + if verifyTime == 0 { // Expiration check disabled + err = nil + } else { + // Maybe the creation time offset pushed it over the edge + // Retry with the actual verification time + config.Time = func() time.Time { + return time.Unix(verifyTime, 0) + } - _, err = signatureReader.Seek(0, io.SeekStart) - if err != nil { - return newSignatureFailed() - } + seeker, ok := origText.(io.ReadSeeker) + if !ok { + return nil, errors.Wrap(err, "gopenpgp: message reader do not support seeking, cannot retry signature verification") + } - signer, err = openpgp.CheckDetachedSignatureAndHash(pubKeyEntries, origText, signatureReader, allowedHashes, config) - if err != nil { - return newSignatureFailed() + _, err = seeker.Seek(0, io.SeekStart) + if err != nil { + return nil, newSignatureFailed(errors.Wrap(err, "gopenpgp: could not rewind the data reader.")) + } + + _, err = signatureReader.Seek(0, io.SeekStart) + if err != nil { + return nil, newSignatureFailed(err) + } + + sig, signer, err = openpgp.VerifyDetachedSignatureAndHash(pubKeyEntries, seeker, signatureReader, allowedHashes, config) } } - if signer == nil { - return newSignatureFailed() + if err != nil { + return nil, newSignatureFailed(err) } - return nil + if sig == nil || signer == nil { + return nil, newSignatureFailed(errors.New("gopenpgp: no signer or valid signature")) + } + + if verificationContext != nil { + err := verificationContext.verifyContext(sig) + if err != nil { + return nil, newSignatureBadContext(err) + } + } + + return sig, nil +} + +func signMessageDetached( + signKeyRing *KeyRing, + messageReader io.Reader, + isBinary bool, + context *SigningContext, +) (*PGPSignature, error) { + config := &packet.Config{ + DefaultHash: crypto.SHA512, + Time: getTimeGenerator(), + } + + signEntity, err := signKeyRing.getSigningEntity() + if err != nil { + return nil, err + } + + if context != nil { + config.SignatureNotations = append(config.SignatureNotations, context.getNotation()) + } + + var outBuf bytes.Buffer + if isBinary { + err = openpgp.DetachSign(&outBuf, signEntity, messageReader, config) + } else { + err = openpgp.DetachSignText(&outBuf, signEntity, messageReader, config) + } + if err != nil { + return nil, errors.Wrap(err, "gopenpgp: error in signing") + } + + return NewPGPSignature(outBuf.Bytes()), nil } diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature_collector.go b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature_collector.go index b5aefa62..fb769a91 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature_collector.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/crypto/signature_collector.go @@ -99,7 +99,7 @@ func (sc *SignatureCollector) Accept( } sc.signature = string(buffer) str, _ := ioutil.ReadAll(rawBody) - canonicalizedBody := internal.CanonicalizeAndTrim(string(str)) + canonicalizedBody := internal.Canonicalize(internal.TrimEachLine(string(str))) rawBody = bytes.NewReader([]byte(canonicalizedBody)) if sc.keyring != nil { _, err = openpgp.CheckArmoredDetachedSignature(sc.keyring, rawBody, bytes.NewReader(buffer), sc.config) @@ -110,7 +110,7 @@ func (sc *SignatureCollector) Accept( case errors.Is(err, pgpErrors.ErrUnknownIssuer): sc.verified = newSignatureNoVerifier() default: - sc.verified = newSignatureFailed() + sc.verified = newSignatureFailed(err) } } else { sc.verified = newSignatureNoVerifier() diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/BUILD.bazel b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/BUILD.bazel index e5b0719a..3a558f88 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/BUILD.bazel +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/BUILD.bazel @@ -4,8 +4,10 @@ go_library( name = "helper", srcs = [ "cleartext.go", + "decrypt_check.go", "helper.go", "key.go", + "message.go", "mobile.go", "mobile_stream.go", "sign_detached.go", @@ -15,6 +17,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/ProtonMail/gopenpgp/v2/crypto", + "//vendor/github.com/ProtonMail/gopenpgp/v2/internal", "//vendor/github.com/pkg/errors", + "@com_github_protonmail_go_crypto//openpgp/packet", ], ) diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/cleartext.go b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/cleartext.go index c3187391..d5517b16 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/cleartext.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/cleartext.go @@ -2,6 +2,7 @@ package helper import ( "github.com/ProtonMail/gopenpgp/v2/crypto" + "github.com/ProtonMail/gopenpgp/v2/internal" "github.com/pkg/errors" ) @@ -48,7 +49,7 @@ func VerifyCleartextMessageArmored(publicKey, armored string, verifyTime int64) // SignCleartextMessage signs text given a private keyring, canonicalizes and // trims the newlines, and returns the PGP-compliant special armoring. func SignCleartextMessage(keyRing *crypto.KeyRing, text string) (string, error) { - message := crypto.NewPlainMessageFromString(text) + message := crypto.NewPlainMessageFromString(internal.TrimEachLine(text)) signature, err := keyRing.SignDetached(message) if err != nil { @@ -67,7 +68,7 @@ func VerifyCleartextMessage(keyRing *crypto.KeyRing, armored string, verifyTime return "", errors.Wrap(err, "gopengpp: unable to unarmor cleartext message") } - message := crypto.NewPlainMessageFromString(clearTextMessage.GetString()) + message := crypto.NewPlainMessageFromString(internal.TrimEachLine(clearTextMessage.GetString())) signature := crypto.NewPGPSignature(clearTextMessage.GetBinarySignature()) err = keyRing.VerifyDetached(message, signature, verifyTime) if err != nil { diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/decrypt_check.go b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/decrypt_check.go new file mode 100644 index 00000000..eeff1089 --- /dev/null +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/decrypt_check.go @@ -0,0 +1,86 @@ +package helper + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/gopenpgp/v2/crypto" + "github.com/pkg/errors" +) + +const AES_BLOCK_SIZE = 16 + +func supported(cipher packet.CipherFunction) bool { + switch cipher { + case packet.CipherAES128, packet.CipherAES192, packet.CipherAES256: + return true + case packet.CipherCAST5, packet.Cipher3DES: + return false + } + return false +} + +func blockSize(cipher packet.CipherFunction) int { + switch cipher { + case packet.CipherAES128, packet.CipherAES192, packet.CipherAES256: + return AES_BLOCK_SIZE + case packet.CipherCAST5, packet.Cipher3DES: + return 0 + } + return 0 +} + +func blockCipher(cipher packet.CipherFunction, key []byte) (cipher.Block, error) { + switch cipher { + case packet.CipherAES128, packet.CipherAES192, packet.CipherAES256: + return aes.NewCipher(key) + case packet.CipherCAST5, packet.Cipher3DES: + return nil, errors.New("gopenpgp: cipher not supported for quick check") + } + return nil, errors.New("gopenpgp: unknown cipher") +} + +// QuickCheckDecryptReader checks with high probability if the provided session key +// can decrypt a data packet given its 24 byte long prefix. +// The method reads up to but not exactly 24 bytes from the prefixReader. +// NOTE: Only works for SEIPDv1 packets with AES. +func QuickCheckDecryptReader(sessionKey *crypto.SessionKey, prefixReader crypto.Reader) (bool, error) { + algo, err := sessionKey.GetCipherFunc() + if err != nil { + return false, errors.New("gopenpgp: cipher algorithm not found") + } + if !supported(algo) { + return false, errors.New("gopenpgp: cipher not supported for quick check") + } + packetParser := packet.NewReader(prefixReader) + _, err = packetParser.Next() + if err != nil { + return false, errors.New("gopenpgp: failed to parse packet prefix") + } + + blockSize := blockSize(algo) + encryptedData := make([]byte, blockSize+2) + _, err = io.ReadFull(prefixReader, encryptedData) + if err != nil { + return false, errors.New("gopenpgp: prefix is too short to check") + } + + blockCipher, err := blockCipher(algo, sessionKey.Key) + if err != nil { + return false, errors.New("gopenpgp: failed to initialize the cipher") + } + _ = packet.NewOCFBDecrypter(blockCipher, encryptedData, packet.OCFBNoResync) + return encryptedData[blockSize-2] == encryptedData[blockSize] && + encryptedData[blockSize-1] == encryptedData[blockSize+1], nil +} + +// QuickCheckDecrypt checks with high probability if the provided session key +// can decrypt the encrypted data packet given its 24 byte long prefix. +// The method only considers the first 24 bytes of the prefix slice (prefix[:24]). +// NOTE: Only works for SEIPDv1 packets with AES. +func QuickCheckDecrypt(sessionKey *crypto.SessionKey, prefix []byte) (bool, error) { + return QuickCheckDecryptReader(sessionKey, bytes.NewReader(prefix)) +} diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/message.go b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/message.go new file mode 100644 index 00000000..b4e58ff0 --- /dev/null +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/message.go @@ -0,0 +1,22 @@ +package helper + +import "github.com/ProtonMail/gopenpgp/v2/crypto" + +// EncryptPGPMessageToAdditionalKey decrypts the session key of the input PGPSplitMessage with a private key in keyRing +// and encrypts it towards the additionalKeys by adding the additional key packets to the input PGPSplitMessage. +// If successful, new key packets are added to message. +// * messageToModify : The encrypted pgp message that should be modified +// * keyRing : The private keys to decrypt the session key in the messageToModify. +// * additionalKey : The public keys the message should be additionally encrypted to. +func EncryptPGPMessageToAdditionalKey(messageToModify *crypto.PGPSplitMessage, keyRing *crypto.KeyRing, additionalKey *crypto.KeyRing) error { + sessionKey, err := keyRing.DecryptSessionKey(messageToModify.KeyPacket) + if err != nil { + return err + } + additionalKeyPacket, err := additionalKey.EncryptSessionKey(sessionKey) + if err != nil { + return err + } + messageToModify.KeyPacket = append(messageToModify.KeyPacket, additionalKeyPacket...) + return nil +} diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/mobile.go b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/mobile.go index d4cd9ada..a08d81d9 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/helper/mobile.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/helper/mobile.go @@ -26,6 +26,20 @@ func DecryptExplicitVerify( return newExplicitVerifyMessage(message, err) } +// DecryptExplicitVerifyWithContext decrypts a PGP message given a private keyring +// and a public keyring to verify the embedded signature. Returns the plain +// data and an error on signature verification failure. +// The caller can provide a context that will be used to verify the signature. +func DecryptExplicitVerifyWithContext( + pgpMessage *crypto.PGPMessage, + privateKeyRing, publicKeyRing *crypto.KeyRing, + verifyTime int64, + verificationContext *crypto.VerificationContext, +) (*ExplicitVerifyMessage, error) { + message, err := privateKeyRing.DecryptWithContext(pgpMessage, publicKeyRing, verifyTime, verificationContext) + return newExplicitVerifyMessage(message, err) +} + // DecryptSessionKeyExplicitVerify decrypts a PGP data packet given a session key // and a public keyring to verify the embedded signature. Returns the plain data and // an error on signature verification failure. @@ -39,6 +53,21 @@ func DecryptSessionKeyExplicitVerify( return newExplicitVerifyMessage(message, err) } +// DecryptSessionKeyExplicitVerifyWithContext decrypts a PGP data packet given a session key +// and a public keyring to verify the embedded signature. Returns the plain data and +// an error on signature verification failure. +// The caller can provide a context that will be used to verify the signature. +func DecryptSessionKeyExplicitVerifyWithContext( + dataPacket []byte, + sessionKey *crypto.SessionKey, + publicKeyRing *crypto.KeyRing, + verifyTime int64, + verificationContext *crypto.VerificationContext, +) (*ExplicitVerifyMessage, error) { + message, err := sessionKey.DecryptAndVerifyWithContext(dataPacket, publicKeyRing, verifyTime, verificationContext) + return newExplicitVerifyMessage(message, err) +} + func newExplicitVerifyMessage(message *crypto.PlainMessage, err error) (*ExplicitVerifyMessage, error) { var explicitVerify *ExplicitVerifyMessage if err != nil { diff --git a/vendor/github.com/ProtonMail/gopenpgp/v2/internal/common.go b/vendor/github.com/ProtonMail/gopenpgp/v2/internal/common.go index 9704cd01..534b33d2 100644 --- a/vendor/github.com/ProtonMail/gopenpgp/v2/internal/common.go +++ b/vendor/github.com/ProtonMail/gopenpgp/v2/internal/common.go @@ -7,14 +7,18 @@ import ( "github.com/ProtonMail/gopenpgp/v2/constants" ) -func CanonicalizeAndTrim(text string) string { +func Canonicalize(text string) string { + return strings.ReplaceAll(strings.ReplaceAll(text, "\r\n", "\n"), "\n", "\r\n") +} + +func TrimEachLine(text string) string { lines := strings.Split(text, "\n") for i := range lines { lines[i] = strings.TrimRight(lines[i], " \t\r") } - return strings.Join(lines, "\r\n") + return strings.Join(lines, "\n") } // CreationTimeOffset stores the amount of seconds that a signature may be diff --git a/vendor/github.com/authzed/authzed-go/proto/authzed/api/v1/BUILD.bazel b/vendor/github.com/authzed/authzed-go/proto/authzed/api/v1/BUILD.bazel index 1ed2e560..ad91a44c 100644 --- a/vendor/github.com/authzed/authzed-go/proto/authzed/api/v1/BUILD.bazel +++ b/vendor/github.com/authzed/authzed-go/proto/authzed/api/v1/BUILD.bazel @@ -24,7 +24,7 @@ go_library( "@com_envoyproxy_protoc_gen_validate//validate:validate_go_proto", "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-openapiv2/options:options_go_proto", "@org_golang_google_genproto_googleapis_api//annotations", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//reflect/protoreflect", diff --git a/vendor/github.com/authzed/authzed-go/v1/BUILD.bazel b/vendor/github.com/authzed/authzed-go/v1/BUILD.bazel index cf8b21fe..e4ff472d 100644 --- a/vendor/github.com/authzed/authzed-go/v1/BUILD.bazel +++ b/vendor/github.com/authzed/authzed-go/v1/BUILD.bazel @@ -9,6 +9,6 @@ go_library( deps = [ "//vendor/github.com/authzed/authzed-go/proto/authzed/api/v1:api", "//vendor/github.com/jzelinskie/stringz", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", ], ) diff --git a/vendor/github.com/authzed/grpcutil/BUILD.bazel b/vendor/github.com/authzed/grpcutil/BUILD.bazel index 5964a84b..f559ee04 100644 --- a/vendor/github.com/authzed/grpcutil/BUILD.bazel +++ b/vendor/github.com/authzed/grpcutil/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth", "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator", "//vendor/github.com/stretchr/testify/require", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//health", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/BUILD.bazel new file mode 100644 index 00000000..f66a7e98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "bearer", + srcs = ["token.go"], + importmap = "peridot.resf.org/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer", + importpath = "github.com/aws/aws-sdk-go/aws/auth/bearer", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/aws/aws-sdk-go/aws"], +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 00000000..dd950a28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 4818ea42..c483e0cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,16 +20,16 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. // Should be used when wanting to see all errors while attempting to @@ -192,6 +192,23 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. @@ -283,16 +300,16 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -425,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config { return c } +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { @@ -432,6 +460,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { return c } +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { @@ -576,6 +611,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel index 48f7b19a..4dad76ee 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "corehandlers", srcs = [ + "awsinternal.go", "handlers.go", "param_validator.go", "user_agent.go", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 00000000..140242dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index ab69c7a6..ac842c55 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{ request.AddToUserAgent(r, execEnvUAKey+"/"+v) }, } + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 785f30d8..329f788a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -31,6 +31,8 @@ package endpointcreds import ( "encoding/json" + "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -69,7 +71,37 @@ type Provider struct { // Optional authorization token value if set will be used as the value of // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error req := p.Client.NewRequest(op, nil, out) req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index e6248360..18694f07 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) return credentials.NewCredentials(p) } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the 'credential_process' and returns the credentials. @@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New( ErrCodeProcessProviderParse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/BUILD.bazel index 01f7a43d..7202d4c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/BUILD.bazel +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/BUILD.bazel @@ -7,16 +7,21 @@ go_library( "os.go", "os_windows.go", "provider.go", + "sso_cached_token.go", + "token_provider.go", ], importmap = "peridot.resf.org/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds", importpath = "github.com/aws/aws-sdk-go/aws/credentials/ssocreds", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/aws/aws-sdk-go/aws", + "//vendor/github.com/aws/aws-sdk-go/aws/auth/bearer", "//vendor/github.com/aws/aws-sdk-go/aws/awserr", "//vendor/github.com/aws/aws-sdk-go/aws/client", "//vendor/github.com/aws/aws-sdk-go/aws/credentials", + "//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults", "//vendor/github.com/aws/aws-sdk-go/service/sso", "//vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface", + "//vendor/github.com/aws/aws-sdk-go/service/ssooidc", ], ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go index 6eda2a55..4138e725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -4,13 +4,13 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "fmt" "io/ioutil" "path/filepath" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -55,6 +55,19 @@ type Provider struct { // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider } // NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured @@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) { // RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal // by exchanging the accessToken present in ~/.aws/sso/cache. func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken } output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, + AccessToken: accessToken, AccountId: &p.AccountID, RoleName: &p.RoleName, }) @@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val }, nil } -func getCacheFileName(url string) (string, error) { +func getCachedFilePath(startUrl string) (string, error) { hash := sha1.New() - _, err := hash.Write([]byte(url)) + _, err := hash.Write([]byte(startUrl)) if err != nil { return "", err } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil } type token struct { @@ -153,13 +165,8 @@ func (t token) Expired() bool { return nowTime().Round(0).After(time.Time(t.ExpiresAt)) } -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) if err != nil { return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000..f6fa8845 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 00000000..3388b78b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,148 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + if createResult.ExpiresIn == nil { + return cachedToken{}, fmt.Errorf("missing required field ExpiresIn") + } + if createResult.AccessToken == nil { + return cachedToken{}, fmt.Errorf("missing required field AccessToken") + } + if createResult.RefreshToken == nil { + return cachedToken{}, fmt.Errorf("missing required field RefreshToken") + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 260a37cb..86db488d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -9,7 +9,7 @@ to refresh the credentials will be synchronized. But, the SDK is unable to ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. -Assume Role +# Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. @@ -27,7 +27,7 @@ with the SDKs's stscreds package. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with static MFA Token +# Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials @@ -49,7 +49,7 @@ credentials. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with MFA Token Provider +# Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -74,7 +74,6 @@ single Credentials with an AssumeRoleProvider can be shared safely. // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) - */ package stscreds @@ -199,6 +198,10 @@ type AssumeRoleProvider struct { // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -320,6 +323,7 @@ func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (crede Tags: p.Tags, PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, } if p.Policy != nil { input.Policy = p.Policy diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go index c07f6731..eeb3bc0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go @@ -8,12 +8,12 @@ import ( // based on some key. The datastructure makes use of a read write // mutex to enable asynchronous use. type EndpointCache struct { - endpoints syncMap - endpointLimit int64 // size is used to count the number elements in the cache. // The atomic package is used to ensure this size is accurate when // using multiple goroutines. - size int64 + size int64 + endpoints syncMap + endpointLimit int64 } // NewEndpointCache will return a newly initialized cache with a limit diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 23bb639e..1ba80b57 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "io/ioutil" "net" "net/http" "net/url" @@ -74,6 +75,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) @@ -114,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro const ( httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + // RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { @@ -134,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P var lookupHostFn = net.LookupHost -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil } - // Host is not an ip, perform lookup addrs, err := lookupHostFn(host) if err != nil { return false, err } + for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { return false, nil } } @@ -154,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) { return true, nil } +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string @@ -164,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) host := aws.URLHostname(parsed) if len(host) == 0 { errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } } } @@ -189,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index df63bade..f4cc8751 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -57,13 +57,13 @@ type EC2Metadata struct { // New creates a new instance of the EC2Metadata client with a session. // This client is safe to use across multiple goroutines. // -// // Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) // -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { c := p.ClientConfig(ServiceName, cfgs...) return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 4b29f190..f1f9ba4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -1,6 +1,8 @@ package ec2metadata import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "net/http" "sync/atomic" "time" @@ -33,11 +35,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { return &tokenProvider{client: c, configuredTTL: duration} } +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + // fetchTokenHandler fetches token for EC2Metadata service client by default. func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { return } @@ -49,23 +55,23 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } - // change the disabled flag on token provider to true, - // when error is request timeout error. + // change the disabled flag on token provider to true and fallback if requestFailureError, ok := err.(awserr.RequestFailure); ok { switch requestFailureError.StatusCode() { case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: atomic.StoreUint32(&t.disabled, 1) + if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) { + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + } case http.StatusBadRequest: r.Error = requestFailureError } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } } return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index bdf2ad74..84dc7dc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -13,6 +13,8 @@ const ( AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. ) // AWS Standard partition's regions. @@ -23,16 +25,22 @@ const ( ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). @@ -65,8 +73,16 @@ const ( UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) +// AWS ISOE (Europe) partition's regions. +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) + +// AWS ISOF partition's regions. +const () + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -74,7 +90,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -90,6 +106,8 @@ var defaultPartitions = partitions{ awsusgovPartition, awsisoPartition, awsisobPartition, + awsisoePartition, + awsisofPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -103,7 +121,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") return reg }(), }, @@ -157,6 +175,9 @@ var awsPartition = partition{ "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, "ap-southeast-1": region{ Description: "Asia Pacific (Singapore)", }, @@ -166,18 +187,30 @@ var awsPartition = partition{ "ap-southeast-3": region{ Description: "Asia Pacific (Jakarta)", }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, "eu-north-1": region{ Description: "Europe (Stockholm)", }, "eu-south-1": region{ Description: "Europe (Milan)", }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, "eu-west-1": region{ Description: "Europe (Ireland)", }, @@ -187,6 +220,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, "me-central-1": region{ Description: "Middle East (UAE)", }, @@ -210,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -237,6 +266,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -246,6 +278,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -255,15 +290,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -282,6 +332,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -318,6 +377,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -399,6 +461,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -408,6 +473,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -426,15 +494,39 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -444,6 +536,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -552,6 +647,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -561,6 +659,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -570,15 +671,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -597,6 +713,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -633,6 +758,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -677,32 +808,69 @@ var awsPartition = partition{ }, }, }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -712,6 +880,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -721,6 +898,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -737,6 +917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -795,6 +978,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -813,6 +999,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -850,6 +1039,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -868,6 +1060,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -897,6 +1092,49 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "api.detective": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -928,6 +1166,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -946,6 +1199,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1084,6 +1340,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -1108,6 +1372,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -1116,6 +1388,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "dkr-us-east-1", }: endpoint{ @@ -1196,6 +1476,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -1212,6 +1500,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1308,6 +1604,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -1402,6 +1706,26 @@ var awsPartition = partition{ }, }, }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.elastic-inference": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1637,24 +1961,60 @@ var awsPartition = partition{ }, "api.mediatailor": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -1672,6 +2032,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1705,6 +2068,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1714,18 +2080,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -1735,6 +2113,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1912,6 +2296,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1976,6 +2363,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1985,6 +2375,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1994,15 +2387,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "apigateway-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2021,6 +2429,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -2057,6 +2474,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2158,6 +2578,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2167,18 +2590,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2188,6 +2623,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2231,24 +2669,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2258,6 +2714,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2313,21 +2775,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -2355,6 +2877,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2364,18 +2889,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2385,6 +2922,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2428,24 +2968,36 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2455,6 +3007,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2642,6 +3197,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2787,9 +3351,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -2892,6 +3474,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -2953,6 +3538,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2962,18 +3550,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2983,6 +3580,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3013,6 +3616,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3031,6 +3640,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3042,7 +3657,7 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "athena": service{ + "arc-zonal-shift": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -3062,6 +3677,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3071,18 +3689,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3092,6 +3722,223 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3128,48 +3975,126 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, }, }, "auditmanager": service{ @@ -3201,15 +4126,75 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "autoscaling": service{ @@ -3237,6 +4222,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3246,18 +4234,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3267,6 +4279,63 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3279,15 +4348,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, }, }, "autoscaling-plans": service{ @@ -3385,6 +4478,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3394,18 +4490,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3415,6 +4523,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3502,22 +4616,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -3546,6 +4644,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3555,18 +4656,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3612,6 +4725,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3656,6 +4775,286 @@ var awsPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "billingconductor": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -3672,6 +5071,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -3702,12 +5104,53 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "cassandra": service{ @@ -3839,71 +5282,304 @@ var awsPartition = partition{ }, }, }, - "cloud9": service{ + "cleanrooms": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloud9": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -3911,57 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -3971,6 +5806,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -4007,51 +5851,126 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -4105,6 +6024,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4114,18 +6036,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4135,6 +6069,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4267,6 +6204,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4282,6 +6222,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -4297,6 +6240,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4374,6 +6320,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4383,18 +6332,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4440,6 +6401,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4487,6 +6451,79 @@ var awsPartition = partition{ }, }, }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "codeartifact": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4550,6 +6587,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4559,18 +6599,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4580,6 +6629,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4660,6 +6715,17 @@ var awsPartition = partition{ }, }, }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, "codecommit": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4680,12 +6746,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4731,6 +6803,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4831,6 +6909,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4840,18 +6921,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4861,6 +6954,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4980,6 +7076,9 @@ var awsPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -4989,15 +7088,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -5010,12 +7121,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5070,6 +7187,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -5180,6 +7306,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5266,30 +7395,60 @@ var awsPartition = partition{ }, "cognito-identity": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5317,6 +7476,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -5326,6 +7494,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5353,6 +7527,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -5366,30 +7546,60 @@ var awsPartition = partition{ }, "cognito-idp": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5435,6 +7645,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5614,12 +7830,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -5726,6 +7957,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -5742,6 +7981,22 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -5758,6 +8013,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -5774,6 +8037,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -5798,6 +8069,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -5868,6 +8155,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5877,18 +8167,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5934,6 +8236,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6007,12 +8312,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, }, }, "connect-campaigns": service{ @@ -6020,6 +8355,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -6069,6 +8410,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -6091,21 +8438,39 @@ var awsPartition = partition{ }, "controltower": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6124,12 +8489,39 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6139,6 +8531,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -6178,6 +8579,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -6198,6 +8617,18 @@ var awsPartition = partition{ }, }, }, + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "cur": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -6710,6 +9141,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6719,6 +9153,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6728,15 +9165,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datasync-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6755,6 +9207,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -6791,6 +9252,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6835,6 +9302,190 @@ var awsPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -6910,6 +9561,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -6925,6 +9582,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -6943,6 +9609,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -6976,6 +9651,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -7007,6 +9688,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7016,18 +9700,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7037,6 +9745,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7073,6 +9799,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7165,6 +9894,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7174,18 +9906,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7195,6 +9939,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7235,6 +9985,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7244,9 +9997,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "dms", }: endpoint{ @@ -7277,12 +10036,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7292,6 +10057,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7511,6 +10279,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7520,18 +10291,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7541,6 +10321,48 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7550,15 +10372,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + }, }, }, "ds": service{ @@ -7581,6 +10427,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7590,6 +10439,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7599,15 +10451,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ds-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7626,6 +10493,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7662,6 +10538,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7731,6 +10613,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7740,6 +10625,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7758,15 +10646,39 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7776,6 +10688,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -7888,6 +10803,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7897,6 +10815,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7906,15 +10827,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ebs-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7933,6 +10869,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7969,6 +10914,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8044,6 +10995,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8053,6 +11007,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8062,15 +11019,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8095,6 +11067,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -8131,6 +11112,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8222,6 +11206,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8231,18 +11218,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8288,6 +11287,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8388,6 +11390,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8397,18 +11402,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8454,6 +11471,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8498,6 +11521,166 @@ var awsPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -8518,6 +11701,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8527,18 +11713,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8557,6 +11755,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8726,6 +11927,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8826,6 +12030,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8853,6 +12066,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8862,6 +12084,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8871,6 +12102,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -8889,6 +12129,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8970,6 +12219,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -8997,6 +12255,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -9006,6 +12273,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -9015,6 +12291,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -9033,6 +12318,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -9060,6 +12354,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -9123,6 +12426,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9213,6 +12525,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9222,18 +12537,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9279,6 +12606,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9352,6 +12682,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9361,6 +12694,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9370,17 +12706,32 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9399,6 +12750,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -9435,6 +12795,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9459,6 +12822,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -9536,9 +12905,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9557,6 +12935,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -9566,6 +12953,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -9575,6 +12980,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9593,9 +13001,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -9609,12 +13029,21 @@ var awsPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -9624,6 +13053,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9639,6 +13071,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9693,6 +13131,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -9736,12 +13180,21 @@ var awsPartition = partition{ }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -9751,6 +13204,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9766,6 +13222,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9820,6 +13282,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -9880,51 +13348,192 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, endpointKey{ Region: "fips", }: endpoint{ @@ -9934,18 +13543,51 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -9964,6 +13606,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -9982,6 +13630,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -10000,6 +13654,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -10037,6 +13697,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10046,18 +13709,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10103,6 +13778,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10201,12 +13879,27 @@ var awsPartition = partition{ }, "finspace": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -10257,6 +13950,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10266,18 +13962,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10323,6 +14031,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10422,6 +14136,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10440,6 +14157,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ap-southeast-2.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10449,6 +14172,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10458,6 +14190,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -10470,6 +14205,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10569,6 +14307,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -10668,6 +14415,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10924,6 +14677,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10933,6 +14689,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10942,15 +14701,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10969,6 +14743,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-ca-central-1", }: endpoint{ @@ -10978,6 +14761,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-east-1", }: endpoint{ @@ -11050,6 +14842,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11071,6 +14869,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-east-1", }: endpoint{ @@ -11251,24 +15067,23 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "gamesparks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "geo": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11278,6 +15093,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11439,6 +15260,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -11459,6 +15292,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -11468,18 +15304,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11525,6 +15373,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11679,6 +15533,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11688,15 +15548,69 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + }, }, }, "groundstation": service{ @@ -11810,6 +15724,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -11819,18 +15736,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11840,6 +15769,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11977,6 +15909,9 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11988,13 +15923,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -12081,42 +16009,90 @@ var awsPartition = partition{ }, "identitystore": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -12159,6 +16135,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "ingest-fips-us-east-2", @@ -12167,6 +16144,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "ingest-fips-us-west-2", @@ -12175,6 +16153,61 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-east-1", @@ -12289,6 +16322,9 @@ var awsPartition = partition{ }, "inspector2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -12298,6 +16334,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -12307,12 +16346,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -12328,6 +16373,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12337,25 +16418,232 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", + }, }, }, "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-east-1", @@ -12403,45 +16691,35 @@ var awsPartition = partition{ Region: "fips-ca-central-1", }: endpoint{ Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-1", }: endpoint{ Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Deprecated: boxedTrue, }, endpointKey{ @@ -13070,24 +17348,239 @@ var awsPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + }, }, }, "iotwireless": service{ @@ -13184,6 +17677,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13204,6 +17722,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13213,18 +17734,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13234,6 +17779,66 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13243,15 +17848,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, }, }, "kafkaconnect": service{ @@ -13308,6 +17937,12 @@ var awsPartition = partition{ }, "kendra": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13317,9 +17952,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -13376,6 +18029,180 @@ var awsPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13396,6 +18223,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13405,18 +18235,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13462,6 +18304,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13529,6 +18374,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13538,18 +18386,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13559,6 +18419,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13750,6 +18616,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-south-2-fips", }: endpoint{ @@ -13813,6 +18688,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -13831,6 +18724,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13849,6 +18760,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-central-2-fips", }: endpoint{ @@ -13894,6 +18814,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-south-2-fips", }: endpoint{ @@ -13957,6 +18886,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14105,24 +19052,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14168,6 +19133,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14268,6 +19239,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14295,6 +19275,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14304,6 +19293,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14313,6 +19311,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14331,6 +19338,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14394,6 +19410,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14503,6 +19528,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14512,18 +19540,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14569,6 +19609,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14613,6 +19659,157 @@ var awsPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, "license-manager-user-subscriptions": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14633,24 +19830,42 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14696,6 +19911,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14791,51 +20012,222 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -14872,18 +20264,51 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -14893,6 +20318,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -14902,6 +20333,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -14911,6 +20348,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -14990,27 +20433,119 @@ var awsPartition = partition{ }, "m2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "machinelearning": service{ @@ -15023,46 +20558,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "macie": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - }, - }, - }, "macie2": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15146,6 +20641,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15212,6 +20710,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15221,12 +20726,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -15267,6 +20790,9 @@ var awsPartition = partition{ }, "mediaconnect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -15276,15 +20802,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15300,6 +20838,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -15319,12 +20860,18 @@ var awsPartition = partition{ }, "mediaconvert": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -15334,6 +20881,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15403,6 +20953,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -15452,15 +21005,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15503,6 +21068,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -15543,6 +21111,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -15552,6 +21123,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15592,6 +21169,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -15601,6 +21181,70 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15666,12 +21310,51 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -15739,12 +21422,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips", }: endpoint{ @@ -15822,6 +21511,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15831,18 +21523,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -15852,6 +21553,100 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15920,6 +21715,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15929,18 +21727,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -15950,6 +21757,48 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15959,15 +21808,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, }, }, "migrationhub-orchestrator": service{ @@ -16162,6 +22035,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16171,18 +22047,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16228,6 +22116,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16295,6 +22186,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16304,18 +22198,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16361,6 +22267,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16586,6 +22498,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16595,6 +22510,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -16607,12 +22525,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16667,6 +22591,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16723,6 +22653,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "nimble": service{ @@ -16730,18 +22678,124 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -16749,6 +22803,14 @@ var awsPartition = partition{ }, "oidc": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -16789,6 +22851,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -16805,6 +22875,22 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -16813,6 +22899,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -16821,6 +22915,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -16837,6 +22939,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -16861,6 +22971,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -16893,6 +23019,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -16903,6 +23037,102 @@ var awsPartition = partition{ }, }, }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17015,6 +23245,55 @@ var awsPartition = partition{ }, }, }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17116,6 +23395,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17265,73 +23550,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -17360,7 +24062,21 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17370,6 +24086,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17450,6 +24175,88 @@ var awsPartition = partition{ }, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17464,6 +24271,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -17573,6 +24383,14 @@ var awsPartition = partition{ }, "portal.sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -17613,6 +24431,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -17629,6 +24455,22 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -17645,6 +24487,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -17661,6 +24511,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -17685,6 +24543,22 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -17717,6 +24591,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -17727,6 +24609,19 @@ var awsPartition = partition{ }, }, }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "profile": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17747,18 +24642,63 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, }, }, "projects.iot1click": service{ @@ -17791,9 +24731,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -17805,6 +24763,166 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, "qldb": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17904,6 +25022,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -17919,6 +25040,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "api", }: endpoint{}, @@ -17928,12 +25052,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -17968,6 +25104,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17977,6 +25116,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17986,15 +25128,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ram-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18013,6 +25170,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -18049,6 +25215,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18116,6 +25285,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18125,6 +25297,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18134,15 +25309,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "rbin-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18161,6 +25351,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -18197,6 +25396,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18264,6 +25466,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18273,6 +25478,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18291,15 +25499,39 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18309,6 +25541,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18324,6 +25559,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds-fips.us-east-1", }: endpoint{ @@ -18378,6 +25622,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds.us-east-1", }: endpoint{ @@ -18656,6 +25918,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18665,6 +25930,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18674,15 +25942,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "redshift-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18701,6 +25984,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -18737,6 +26029,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18792,33 +26087,132 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, "rekognition": service{ @@ -18865,6 +26259,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "rekognition-fips.ca-central-1", }: endpoint{ @@ -19138,6 +26535,97 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -19158,6 +26646,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -19167,18 +26658,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19224,6 +26727,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19313,6 +26822,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -19322,18 +26834,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19343,6 +26867,48 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19352,15 +26918,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, }, }, "route53": service{ @@ -19439,6 +27029,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -19448,18 +27041,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19469,6 +27074,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19491,33 +27102,81 @@ var awsPartition = partition{ }, "rum": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -19661,6 +27320,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -19670,18 +27332,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19691,6 +27365,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19855,6 +27535,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -19890,6 +27579,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, endpointKey{ Region: "aws-global", }: endpoint{ @@ -19920,6 +27618,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19929,6 +27648,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -19947,6 +27675,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -19987,6 +27724,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -20023,6 +27769,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20191,6 +27946,44 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -20267,6 +28060,25 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -20305,6 +28117,44 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -20354,6 +28204,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -20373,6 +28272,25 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -20392,6 +28310,44 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -20449,6 +28405,63 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -20671,55 +28684,123 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -20750,40 +28831,94 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "savingsplans": service{ @@ -20800,8 +28935,11 @@ var awsPartition = partition{ }, }, }, - "schemas": service{ + "scheduler": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -20811,6 +28949,94 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -20820,15 +29046,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -20838,6 +29076,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20896,145 +29140,288 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Deprecated: boxedTrue, }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-2-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + Deprecated: boxedTrue, }, }, @@ -21059,6 +29446,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21068,18 +29458,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21125,6 +29527,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21169,6 +29577,121 @@ var awsPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -21251,21 +29774,85 @@ var awsPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "servicecatalog": service{ @@ -21288,6 +29875,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21297,18 +29887,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21318,6 +29917,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21418,6 +30023,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21427,6 +30035,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -21436,15 +30047,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21499,6 +30119,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21548,39 +30174,123 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1-fips", }: endpoint{ @@ -21590,69 +30300,165 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ - Region: "servicediscovery", + Region: "sa-east-1", + Variant: dualStackVariant, }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, + Hostname: "servicediscovery.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1-fips", }: endpoint{ @@ -21665,12 +30471,24 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2-fips", }: endpoint{ @@ -21683,12 +30501,24 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1-fips", }: endpoint{ @@ -21701,12 +30531,24 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2-fips", }: endpoint{ @@ -21743,6 +30585,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21752,18 +30597,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21773,6 +30630,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21913,7 +30776,7 @@ var awsPartition = partition{ }, }, }, - "sms": service{ + "signer": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -21960,7 +30823,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + Hostname: "signer-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -21969,7 +30832,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + Hostname: "signer-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -21978,7 +30841,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + Hostname: "signer-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -21987,12 +30850,44 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", + Hostname: "signer-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22006,7 +30901,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + Hostname: "signer-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -22015,7 +30910,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + Hostname: "signer-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -22024,7 +30919,217 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-west-2", @@ -22039,36 +31144,165 @@ var awsPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, }, }, "snowball": service{ @@ -22331,6 +31565,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -22403,6 +31643,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22412,18 +31655,36 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22433,6 +31694,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -22469,6 +31739,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22542,6 +31815,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22551,18 +31827,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22608,6 +31896,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22678,6 +31969,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22687,6 +31981,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -22696,15 +31993,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ssm-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22723,6 +32035,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -22759,6 +32080,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22806,6 +32130,118 @@ var awsPartition = partition{ }, }, }, + "ssm-contacts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + }, + }, + }, "ssm-incidents": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -22826,6 +32262,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22841,85 +32283,93 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, }, }, - "states": service{ + "ssm-sap": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -22951,6 +32401,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22969,6 +32425,248 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -23005,6 +32703,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23072,6 +32773,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23081,6 +32785,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -23099,15 +32806,39 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23118,14 +32849,11 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -23234,6 +32962,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23243,18 +32974,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23264,6 +33007,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -23317,6 +33063,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23326,6 +33075,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "aws-global", }: endpoint{ @@ -23337,15 +33089,24 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23355,6 +33116,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23484,6 +33248,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23493,18 +33260,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23550,6 +33329,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23617,6 +33399,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23626,18 +33411,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23683,6 +33480,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23750,6 +33550,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23759,18 +33562,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23780,6 +33595,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23808,36 +33626,96 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -23886,39 +33764,146 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, + }, + }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "transcribe": service{ @@ -24070,12 +34055,21 @@ var awsPartition = partition{ }, "transcribestreaming": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -24233,12 +34227,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24248,15 +34251,30 @@ var awsPartition = partition{ }: endpoint{ Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24275,6 +34293,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24311,6 +34338,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -24437,6 +34470,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -24457,6 +34505,266 @@ var awsPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "voiceid": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24468,22 +34776,46 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ - + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, Deprecated: boxedTrue, }, endpointKey{ @@ -24492,13 +34824,69 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, + }, + }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", }: endpoint{}, }, }, @@ -24665,6 +35053,23 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -24716,6 +35121,23 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -24750,6 +35172,23 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -24784,6 +35223,23 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -24889,6 +35345,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -24916,6 +35381,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -24934,6 +35408,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -24952,6 +35435,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -24979,6 +35471,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -25033,6 +35543,40 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -25241,6 +35785,23 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -25292,6 +35853,23 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -25309,6 +35887,23 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -25326,6 +35921,23 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -25360,6 +35972,23 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -25465,6 +36094,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -25492,6 +36130,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -25501,6 +36148,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -25510,6 +36166,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -25528,6 +36193,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -25555,6 +36229,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -25609,6 +36301,40 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -25776,9 +36502,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25800,9 +36535,18 @@ var awsPartition = partition{ endpointKey{ Region: "ui-ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ui-ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, endpointKey{ Region: "ui-eu-central-1", }: endpoint{}, @@ -25951,6 +36695,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -26028,6 +36775,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -26037,18 +36787,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -26094,6 +36856,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26233,6 +36998,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.ecr": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26253,6 +37043,20 @@ var awscnPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26360,7 +37164,7 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "athena": service{ + "arc-zonal-shift": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -26370,6 +37174,28 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -26463,9 +37289,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -26599,7 +37437,10 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, @@ -26625,6 +37466,41 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26752,6 +37628,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26837,9 +37738,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "emr-containers": service{ @@ -26852,7 +37765,7 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "es": service{ + "emr-serverless": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -26862,6 +37775,41 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "events": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27017,14 +37965,52 @@ var awscnPartition = partition{ }, }, }, - "iot": service{ + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", }, }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "iot": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27077,6 +38063,29 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-cn-north-1", + }: endpoint{ + Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "data-cn-north-1", + }: endpoint{ + Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27087,6 +38096,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27107,6 +38141,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27159,6 +38200,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27174,7 +38225,7 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, @@ -27191,6 +38242,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "monitoring": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -27236,6 +38297,46 @@ var awscnPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -27258,6 +38359,34 @@ var awscnPartition = partition{ }, }, "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, + }, + }, + "pipes": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27274,6 +38403,58 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27314,6 +38495,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27324,6 +38515,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -27455,7 +38656,28 @@ var awscnPartition = partition{ }, }, }, - "secretsmanager": service{ + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "schemas": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27465,6 +38687,24 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{}, + }, + }, "securityhub": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27509,12 +38749,29 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, - "sms": service{ + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27524,6 +38781,39 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "snowball": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27605,7 +38895,7 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "states": service{ + "sso": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27615,6 +38905,28 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "storagegateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27938,6 +39250,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -27946,6 +39276,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "acm": service{ @@ -28200,6 +39548,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -28347,12 +39698,42 @@ var awsusgovPartition = partition{ }, "appconfigdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -28369,13 +39750,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, }, }, "applicationinsights": service{ @@ -28417,6 +39830,24 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -28437,6 +39868,16 @@ var awsusgovPartition = partition{ }, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28460,21 +39901,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "athena-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, }, }, "autoscaling": service{ @@ -28509,13 +39974,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, }, }, "backup": service{ @@ -28586,6 +40083,45 @@ var awsusgovPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "cassandra": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28596,6 +40132,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -28604,6 +40158,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudcontrolapi": service{ @@ -28629,21 +40201,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -28651,6 +40247,21 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudformation": service{ @@ -28663,6 +40274,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -28671,6 +40300,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudhsm": service{ @@ -28876,6 +40523,15 @@ var awsusgovPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -28885,6 +40541,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -28896,6 +40561,13 @@ var awsusgovPartition = partition{ }, }, }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28989,6 +40661,26 @@ var awsusgovPartition = partition{ }, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "config": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -29039,9 +40731,24 @@ var awsusgovPartition = partition{ }, "connect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, }, }, "controltower": service{ @@ -29049,9 +40756,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "data-ats.iot": service{ @@ -29144,9 +40881,24 @@ var awsusgovPartition = partition{ }, "databrew": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, }, }, "datasync": service{ @@ -29189,6 +40941,31 @@ var awsusgovPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29214,9 +40991,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "dms": service{ @@ -29306,6 +41113,46 @@ var awsusgovPartition = partition{ }, }, }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "ds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29541,6 +41388,31 @@ var awsusgovPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, "elasticache": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -29593,6 +41465,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -29601,6 +41491,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "elasticfilesystem": service{ @@ -29725,6 +41633,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -29736,6 +41650,13 @@ var awsusgovPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -29747,6 +41668,15 @@ var awsusgovPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -29756,6 +41686,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -29767,6 +41706,86 @@ var awsusgovPartition = partition{ }, }, }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29781,6 +41800,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -29799,6 +41824,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -30035,24 +42066,68 @@ var awsusgovPartition = partition{ }, }, }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "glacier.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "glacier.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, }, }, }, @@ -30079,21 +42154,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, }, }, "greengrass": service{ @@ -30123,36 +42222,38 @@ var awsusgovPartition = partition{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + Hostname: "greengrass.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", }, }, }, @@ -30209,7 +42310,21 @@ var awsusgovPartition = partition{ }, }, "health": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "global.health.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -30346,6 +42461,28 @@ var awsusgovPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30386,36 +42523,90 @@ var awsusgovPartition = partition{ }, }, }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, + "inspector2": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-gov-east-1", }, Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-gov-west-1", }, Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -30555,14 +42746,114 @@ var awsusgovPartition = partition{ }, }, }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "kendra": service{ @@ -30587,8 +42878,51 @@ var awsusgovPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{ @@ -30597,6 +42931,15 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -30605,6 +42948,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "kinesisanalytics": service{ @@ -30617,6 +42969,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30689,21 +43097,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, }, }, "lambda": service{ @@ -30798,6 +43230,26 @@ var awsusgovPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30838,6 +43290,36 @@ var awsusgovPartition = partition{ }, }, }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30848,12 +43330,22 @@ var awsusgovPartition = partition{ "mediaconvert": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", }, }, }, @@ -30914,6 +43406,63 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -31111,6 +43660,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "oidc": service{ @@ -31168,32 +43735,136 @@ var awsusgovPartition = partition{ "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "outposts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "outposts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", }, }, }, "participant.connect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "participant.connect.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, }, }, }, @@ -31276,6 +43947,31 @@ var awsusgovPartition = partition{ }, }, }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, "quicksight": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31296,6 +43992,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -31304,6 +44018,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "rbin": service{ @@ -31481,6 +44213,46 @@ var awsusgovPartition = partition{ }, }, }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -31536,6 +44308,46 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -31573,6 +44385,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -31625,6 +44470,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -31867,17 +44715,33 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, }, }, "secretsmanager": service{ @@ -31885,37 +44749,43 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - }, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + Deprecated: boxedTrue, }, }, @@ -31960,6 +44830,46 @@ var awsusgovPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -31969,21 +44879,45 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, }: endpoint{ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, @@ -32077,12 +45011,24 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ @@ -32095,12 +45041,24 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ @@ -32163,12 +45121,12 @@ var awsusgovPartition = partition{ }, }, }, - "sms": service{ + "signer": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, @@ -32177,7 +45135,79 @@ var awsusgovPartition = partition{ endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -32190,7 +45220,29 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", @@ -32205,9 +45257,42 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + }, }, }, "snowball": service{ @@ -32282,14 +45367,14 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, }, }, @@ -32381,6 +45466,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -32389,6 +45492,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "states": service{ @@ -32633,6 +45754,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -32641,6 +45780,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "synthetics": service{ @@ -32716,21 +45873,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -32861,6 +46042,46 @@ var awsusgovPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "waf-regional": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32985,6 +46206,15 @@ var awsusgovPartition = partition{ }, "workspaces": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -32994,6 +46224,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -33107,6 +46346,20 @@ var awsisoPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33156,6 +46409,23 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33168,6 +46438,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33227,6 +46507,46 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33237,6 +46557,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -33339,6 +46669,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ec2": service{ @@ -33371,6 +46704,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticache": service{ @@ -33394,6 +46730,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -33403,6 +46748,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "elasticloadbalancing": service{ @@ -33419,14 +46773,45 @@ var awsisoPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, }, }, "es": service{ @@ -33454,6 +46839,58 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, }, }, "glacier": service{ @@ -33468,6 +46905,26 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "health": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33563,6 +47020,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "logs": service{ @@ -33589,6 +47049,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33616,17 +47083,125 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, - "rds": service{ + "rbin": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-iso-east-1", @@ -33655,6 +47230,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ @@ -33671,15 +47249,186 @@ var awsisoPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, }, }, "secretsmanager": service{ @@ -33687,6 +47436,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "snowball": service{ @@ -33694,6 +47446,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sns": service{ @@ -33801,6 +47556,16 @@ var awsisoPartition = partition{ }, }, "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-iso-east-1", @@ -33843,6 +47608,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, }, @@ -33896,6 +47664,27 @@ var awsisobPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33922,6 +47711,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -33934,6 +47730,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "cloudformation": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33969,6 +47772,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -34123,9 +47933,24 @@ var awsisobPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "es": service{ @@ -34142,6 +47967,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34229,6 +48061,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34243,6 +48089,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34250,6 +48103,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34257,18 +48117,69 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, - "rds": service{ + "rbin": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-isob-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, }, }, "resource-groups": service{ @@ -34299,6 +48210,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34306,6 +48224,110 @@ var awsisobPartition = partition{ SignatureVersions: []string{"s3v4"}, }, }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-isob-east-1", @@ -34358,6 +48380,37 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -34423,3 +48476,75 @@ var awsisobPartition = partition{ }, }, } + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel index cd24c347..bb295719 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel @@ -31,6 +31,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws/request", "//vendor/github.com/aws/aws-sdk-go/internal/ini", "//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults", + "//vendor/github.com/aws/aws-sdk-go/service/ssooidc", "//vendor/github.com/aws/aws-sdk-go/service/sts", ], ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 1d3f4c3a..ea8e3537 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" "github.com/aws/aws-sdk-go/service/sts" ) @@ -23,6 +24,10 @@ type CredentialsProviderOptions struct { // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, // such as setting its ExpiryWindow. WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) } func resolveCredentials(cfg *aws.Config, @@ -33,7 +38,7 @@ func resolveCredentials(cfg *aws.Config, switch { case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration + // User explicitly provided a Profile in the session's configuration // so load that profile from shared config first. // Github(aws/aws-sdk-go#2727) return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) @@ -134,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config, case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) default: // Fallback to default credentials provider, include mock errors for @@ -173,8 +182,28 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req return nil, err } + var optFns []func(provider *ssocreds.Provider) cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } return ssocreds.NewCredentials( &Session{ @@ -184,6 +213,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, + optFns..., ), nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d6fa2477..93bb5de6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -171,6 +171,12 @@ type envConfig struct { // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -251,6 +257,9 @@ var ( ec2IMDSEndpointModeEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { return envConfig{}, err } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { return cfg, err @@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) { } } +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { for _, k := range keys { value := os.Getenv(k) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 4293dbe1..3c88dee5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -37,7 +37,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source @@ -174,7 +174,6 @@ const ( // Options provides the means to control how a Session is created and what // configuration values will be loaded. -// type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field @@ -224,7 +223,7 @@ type Options struct { // from stdin for the MFA token code. // // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. + // the config enables assume role with MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) // When the SDK's shared config is configured to assume a role this option @@ -322,24 +321,24 @@ type Options struct { // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) // -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) // -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig var err error @@ -375,7 +374,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { // This helper is intended to be used in variable initialization to load the // Session and configuration at startup. Such as: // -// var sess = session.Must(session.NewSession()) +// var sess = session.Must(session.NewSession()) func Must(sess *Session, err error) *Session { if err != nil { panic(err) @@ -780,14 +779,12 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) } cfg.S3UseARNRegion = userCfg.S3UseARNRegion @@ -812,6 +809,17 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + return nil } @@ -845,8 +853,8 @@ func initHandlers(s *Session) { // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 424c82b4..f3ce8183 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -26,6 +26,13 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + // AWS Single Sign-On (AWS SSO) group ssoAccountIDKey = "sso_account_id" ssoRegionKey = "sso_region" @@ -73,6 +80,9 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -99,6 +109,10 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *ssoSession + SSOAccountID string SSORegion string SSORoleName string @@ -168,6 +182,12 @@ type sharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -186,6 +206,20 @@ type sharedConfigFile struct { IniData ini.Sections } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + // loadSharedConfig retrieves the configuration from the list of files using // the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in @@ -266,13 +300,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s // profile only have credential provider options. cfg.clearAssumeRoleOptions() } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } + profiles[profile] = struct{}{} if err := cfg.validateCredentialType(); err != nil { @@ -308,6 +342,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.SourceProfile = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + return nil } @@ -340,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.Region, section, regionKey) updateString(&cfg.CustomCABundle, section, customCABundleKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } cfg.AssumeRoleDuration = &d } @@ -363,6 +428,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e cfg.S3UsEast1RegionalEndpoint = sre } + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + // AWS Single Sign-On (AWS SSO) updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) updateString(&cfg.SSORegion, section, ssoRegionKey) @@ -374,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) @@ -461,32 +531,20 @@ func (cfg *sharedConfig) validateCredentialType() error { } func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } return nil } - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) - } - return nil } @@ -525,15 +583,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { } func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) } - return true + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil } func oneOrNone(bs ...bool) bool { @@ -566,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -575,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -703,7 +828,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.DualStackEndpointStateEnabled } else { *dst = endpoints.DualStackEndpointStateDisabled @@ -719,7 +845,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.FIPSEndpointStateEnabled } else { *dst = endpoints.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 4d78162c..b542df93 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -3,21 +3,21 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage // of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent +// you may need to use the URL.Opaque to define what the raw URI should be sent // to the service as. // // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -135,6 +136,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -695,7 +697,8 @@ func (ctx *signingCtx) buildBodyDigest() error { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" s3Presign := ctx.isPresign && (ctx.ServiceName == "s3" || diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 88348fb1..b2040b05 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.129" +const SDKVersion = "1.54.19" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 34a481af..b1b68608 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -154,11 +154,11 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType + DecimalType // deprecated + IntegerType // deprecated StringType QuotedStringType - BoolType + BoolType // deprecated ) // Value is a union container @@ -166,9 +166,9 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated str string } @@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) { } token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) { } // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 081cf433..1d08e138 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go index c0c52e2d..9c1ccde5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -13,17 +13,46 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + // UnmarshalTypedError provides unmarshaling errors API response errors // for both typed and untyped errors. type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error } // NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the // set of exception names to the error unmarshalers func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { return &UnmarshalTypedError{ - exceptions: exceptions, + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions } } @@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError( code := codeParts[len(codeParts)-1] msg := jsonErr.Message + queryCodeParts := queryCodeParts(resp, u) + if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) if err != nil { return nil, err } - return v, nil } + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + // fallback to unmodeled generic exceptions return awserr.NewRequestFailure( awserr.New(code, msg, nil), @@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError( ), nil } +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + // UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc // protocol request errors var UnmarshalErrorHandler = request.NamedHandler{ diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 05833405..2ca0b19d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1d273ff0..ecc521f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -287,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) if tag.Get("location") != "header" || tag.Get("enum") == "" { return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) } + if len(value) == 0 { + return "", errValueNotSet + } + buff := &bytes.Buffer{} for i, sv := range value { if sv == nil || len(*sv) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go index d756d8cc..5366a646 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -2,6 +2,7 @@ package restjson import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -40,52 +41,30 @@ func (u *UnmarshalTypedError) UnmarshalError( resp *http.Response, respMeta protocol.ResponseMetadata, ) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err } - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } - - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } - - return v, nil + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil } - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil } // UnmarshalErrorHandler is a named request handler for unmarshaling restjson @@ -99,36 +78,80 @@ var UnmarshalErrorHandler = request.NamedHandler{ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), + awserr.New(code, msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) } type jsonErrorResponse struct { + Type string `json:"__type"` Code string `json:"code"` Message string `json:"message"` } + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 244e075f..7286d567 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -190,10 +190,11 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // // A single operation can retrieve up to 16 MB of data, which can contain as // many as 100 items. BatchGetItem returns a partial result if the response -// size limit is exceeded, the table's provisioned throughput is exceeded, or -// an internal processing failure occurs. If a partial result is returned, the -// operation returns a value for UnprocessedKeys. You can use this value to -// retry the operation starting with the next item to get. +// size limit is exceeded, the table's provisioned throughput is exceeded, more +// than 1MB per partition is requested, or an internal processing failure occurs. +// If a partial result is returned, the operation returns a value for UnprocessedKeys. +// You can use this value to retry the operation starting with the next item +// to get. // // If you request more than 100 items, BatchGetItem returns a ValidationException // with the message "Too many items requested for the BatchGetItem call." @@ -223,7 +224,8 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // in the request. If you want strongly consistent reads instead, you can set // ConsistentRead to true for any or all tables. // -// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// In order to minimize response latency, BatchGetItem may retrieve items in +// parallel. // // When designing your application, keep in mind that DynamoDB does not return // items in any particular order. To help parse the response by item, include @@ -413,7 +415,10 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // in DynamoDB's JSON format for the API call. For more details on this distinction, // see Naming Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html). // -// BatchWriteItem cannot update items. To update items, use the UpdateItem action. +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// on an existing item, that item's values will be overwritten by the operation +// and it will appear like it was updated. To update items, we recommend you +// use the UpdateItem action. // // The individual PutItem and DeleteItem operations specified in BatchWriteItem // are atomic; however BatchWriteItem as a whole is not. If any requested operations @@ -479,6 +484,10 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // - The total request size exceeds 16 MB. // +// - Any individual items with keys exceeding the key length limits. For +// a partition key, the limit is 2048 bytes and for a sort key, the limit +// is 1024 bytes. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -659,17 +668,26 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -767,8 +785,16 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // relationship between two or more DynamoDB tables with the same table name // in the provided Regions. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). // // If you want to add a new replica table to a global table, each of the following // conditions must be true: @@ -820,17 +846,26 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -966,17 +1001,26 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -1093,17 +1137,26 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -1273,6 +1326,164 @@ func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput return out, req.Send() } +const opDeleteResourcePolicy = "DeleteResourcePolicy" + +// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteResourcePolicyRequest method. +// req, resp := client.DeleteResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy +func (c *DynamoDB) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) { + op := &request.Operation{ + Name: opDeleteResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourcePolicyInput{} + } + + output = &DeleteResourcePolicyOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteResourcePolicy API operation for Amazon DynamoDB. +// +// Deletes the resource-based policy attached to the resource, which can be +// a table or stream. +// +// DeleteResourcePolicy is an idempotent operation; running it multiple times +// on the same resource doesn't result in an error response, unless you specify +// an ExpectedRevisionId, which will then return a PolicyNotFoundException. +// +// To make sure that you don't inadvertently lock yourself out of your own resources, +// the root principal in your Amazon Web Services account can perform DeleteResourcePolicy +// requests, even if your resource-based policy explicitly denies the root principal's +// access. +// +// DeleteResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy +// request immediately after running the DeleteResourcePolicy request, DynamoDB +// might still return the deleted policy. This is because the policy for your +// resource might not have been deleted yet. Wait for a few seconds, and then +// try the GetResourcePolicy request again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteResourcePolicy for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - PolicyNotFoundException +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId, it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy +func (c *DynamoDB) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + return out, req.Send() +} + +// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteTable = "DeleteTable" // DeleteTableRequest generates a "aws/request.Request" representing the @@ -1348,6 +1559,9 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req // If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. // If table is already in the DELETING state, no error is returned. // +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// // DynamoDB might continue to accept data read and write operations, such as // GetItem and PutItem, on a table in the DELETING state until the table deletion // is complete. @@ -1381,17 +1595,26 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -1690,7 +1913,7 @@ func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributor // DescribeContributorInsights API operation for Amazon DynamoDB. // -// Returns information about contributor insights, for a given table or global +// Returns information about contributor insights for a given table or global // secondary index. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1774,7 +1997,8 @@ func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req // DescribeEndpoints API operation for Amazon DynamoDB. // -// Returns the regional endpoint information. +// Returns the regional endpoint information. For more information on policy +// permissions, please see Internetwork traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1936,17 +2160,26 @@ func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *reque // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -2042,10 +2275,16 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( // // Returns information about the specified global table. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// you can use DescribeTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) -// instead. +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2154,8 +2393,16 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable // // Describes Region-specific settings for a global table. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2614,6 +2861,9 @@ func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request // table, when it was created, the primary key schema, and any indexes on the // table. // +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// // If you issue a DescribeTable request immediately after a CreateTable request, // DynamoDB might return a ResourceNotFoundException. This is because DescribeTable // uses an eventually consistent query, and the metadata for your table might @@ -2703,8 +2953,8 @@ func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableRe // // Describes auto scaling settings across replicas of the global table at once. // -// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2938,17 +3188,26 @@ func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKines // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ResourceInUseException // The operation conflicts with the resource's availability. For example, you // attempted to recreate an existing table, or tried to delete a table currently @@ -3068,17 +3327,26 @@ func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesis // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ResourceInUseException // The operation conflicts with the resource's availability. For example, you // attempted to recreate an existing table, or tried to delete a table currently @@ -3165,7 +3433,8 @@ func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *r // A single SELECT statement response can return up to the maximum number of // items (if using the Limit parameter) or a maximum of 1 MB of data (and then // apply any filtering to the results using WHERE clause). If LastEvaluatedKey -// is present in the response, you need to paginate the result set. +// is present in the response, you need to paginate the result set. If NextToken +// is present, you need to paginate the result set and include NextToken. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3319,6 +3588,10 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -3369,7 +3642,7 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // as DynamoDB is automatically scaling the table. Throughput exceeds the // current capacity for one or more global secondary indexes. DynamoDB is // automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is returned when writes get throttled on an On-Demand GSI as DynamoDB // is automatically scaling the GSI. // // - Validation Error: Code: ValidationError Messages: One or more parameter @@ -3387,6 +3660,47 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // - TransactionInProgressException // The transaction with the given request token is already in progress. // +// Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +// // - IdempotentParameterMismatchException // DynamoDB rejected the request because you retried a request with a different // payload but with an idempotent token that was already used. @@ -3496,17 +3810,26 @@ func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTi // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InvalidExportTimeException // The specified ExportTime is outside of the point in time recovery window. // @@ -3666,6 +3989,147 @@ func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts return out, req.Send() } +const opGetResourcePolicy = "GetResourcePolicy" + +// GetResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetResourcePolicy for more information on using the GetResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetResourcePolicyRequest method. +// req, resp := client.GetResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy +func (c *DynamoDB) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) { + op := &request.Operation{ + Name: opGetResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetResourcePolicyInput{} + } + + output = &GetResourcePolicyOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// GetResourcePolicy API operation for Amazon DynamoDB. +// +// Returns the resource-based policy document attached to the resource, which +// can be a table or stream, in JSON format. +// +// GetResourcePolicy follows an eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) +// model. The following list describes the outcomes when you issue the GetResourcePolicy +// request immediately after issuing another request: +// +// - If you issue a GetResourcePolicy request immediately after a PutResourcePolicy +// request, DynamoDB might return a PolicyNotFoundException. +// +// - If you issue a GetResourcePolicyrequest immediately after a DeleteResourcePolicy +// request, DynamoDB might return the policy that was present before the +// deletion request. +// +// - If you issue a GetResourcePolicy request immediately after a CreateTable +// request, which includes a resource-based policy, DynamoDB might return +// a ResourceNotFoundException or a PolicyNotFoundException. +// +// Because GetResourcePolicy uses an eventually consistent query, the metadata +// for your policy or table might not be available at that moment. Wait for +// a few seconds, and then retry the GetResourcePolicy request. +// +// After a GetResourcePolicy request returns a policy created using the PutResourcePolicy +// request, the policy will be applied in the authorization of requests to the +// resource. Because this process is eventually consistent, it will take some +// time to apply the policy to all requests to a resource. Policies that you +// attach while creating a table using the CreateTable request will always be +// applied to all requests for that table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation GetResourcePolicy for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - PolicyNotFoundException +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId, it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy +func (c *DynamoDB) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) + return out, req.Send() +} + +// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opImportTable = "ImportTable" // ImportTableRequest generates a "aws/request.Request" representing the @@ -3728,17 +4192,26 @@ func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ImportConflictException // There was a conflict when importing from the specified S3 source. This can // occur when the current import conflicts with a previous import request that @@ -3834,9 +4307,10 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // ListBackups API operation for Amazon DynamoDB. // -// List backups associated with an Amazon Web Services account. To list backups -// for a given table, specify TableName. ListBackups returns a paginated list -// of results with at most 1 MB worth of items in a page. You can also specify +// List DynamoDB backups that are associated with an Amazon Web Services account +// and weren't made with Amazon Web Services Backup. To list these backups for +// a given table, specify TableName. ListBackups returns a paginated list of +// results with at most 1 MB worth of items in a page. You can also specify // a maximum number of entries to be returned in a page. // // In the request, start time is inclusive, but end time is exclusive. Note @@ -3844,6 +4318,9 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // // You can call ListBackups a maximum of five times per second. // +// If you want to retrieve the complete list of backups made with Amazon Web +// Services Backup, use the Amazon Web Services Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4081,17 +4558,26 @@ func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -4238,8 +4724,16 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r // // Lists all global tables that have a replica in the specified Region. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4337,17 +4831,26 @@ func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) { req, out := c.ListImportsRequest(input) @@ -4772,7 +5275,6 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // using the ReturnValues parameter. // // When you add an item, the primary key attributes are the only required attributes. -// Attribute values cannot be null. // // Empty String and Binary attribute values are allowed. Attribute values of // type String and Binary must have a length greater than zero if the attribute @@ -4852,6 +5354,162 @@ func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts return out, req.Send() } +const opPutResourcePolicy = "PutResourcePolicy" + +// PutResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutResourcePolicy for more information on using the PutResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutResourcePolicyRequest method. +// req, resp := client.PutResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy +func (c *DynamoDB) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { + op := &request.Operation{ + Name: opPutResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutResourcePolicyInput{} + } + + output = &PutResourcePolicyOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// PutResourcePolicy API operation for Amazon DynamoDB. +// +// Attaches a resource-based policy document to the resource, which can be a +// table or stream. When you attach a resource-based policy using this API, +// the policy application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html). +// +// PutResourcePolicy is an idempotent operation; running it multiple times on +// the same resource using the same policy document will return the same revision +// ID. If you specify an ExpectedRevisionId that doesn't match the current policy's +// RevisionId, the PolicyNotFoundException will be returned. +// +// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy +// request immediately after a PutResourcePolicy request, DynamoDB might return +// your previous policy, if there was one, or return the PolicyNotFoundException. +// This is because GetResourcePolicy uses an eventually consistent query, and +// the metadata for your policy or table might not be available at that moment. +// Wait for a few seconds, and then try the GetResourcePolicy request again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation PutResourcePolicy for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// +// - PolicyNotFoundException +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId, it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy +func (c *DynamoDB) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + return out, req.Send() +} + +// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opQuery = "Query" // QueryRequest generates a "aws/request.Request" representing the @@ -5147,7 +5805,7 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // RestoreTableFromBackup API operation for Amazon DynamoDB. // // Creates a new table from an existing backup. Any number of users can execute -// up to 4 concurrent restores (any type of restore) in a given account. +// up to 50 concurrent restores (any type of restore) in a given account. // // You can call RestoreTableFromBackup at a maximum rate of 10 times per second. // @@ -5190,17 +5848,26 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -5296,7 +5963,7 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // // Restores the specified table to the specified point in time within EarliestRestorableDateTime // and LatestRestorableDateTime. You can restore your table to any point in -// time during the last 35 days. Any number of users can execute up to 4 concurrent +// time during the last 35 days. Any number of users can execute up to 50 concurrent // restores (any type of restore) in a given account. // // When you restore using point in time recovery, DynamoDB restores your table @@ -5354,17 +6021,26 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InvalidRestoreTimeException // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime // and LatestRestorableDateTime. @@ -5475,17 +6151,24 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // every item in a table or a secondary index. To have DynamoDB return fewer // items, you can provide a FilterExpression operation. // -// If the total number of scanned items exceeds the maximum dataset size limit -// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey -// value to continue the scan in a subsequent operation. The results also include -// the number of items exceeding the limit. A scan can result in no table data -// meeting the filter criteria. +// If the total size of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey +// value is also returned and the requestor can use the LastEvaluatedKey to +// continue the scan in a subsequent operation. Each scan response also includes +// number of items that were scanned (ScannedCount) as part of the request. +// If using a FilterExpression, a scan result can result in no items meeting +// the criteria and the Count will result in zero. If you did not use a FilterExpression +// in the scan request, then Count is the same as ScannedCount. // -// A single Scan operation reads up to the maximum number of items set (if using -// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering -// to the results using FilterExpression. If LastEvaluatedKey is present in -// the response, you need to paginate the result set. For more information, -// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// Count and ScannedCount only return the count of items specific to a single +// scan request and, unless the table is less than 1MB, do not represent the +// total number of items in the table. +// +// A single Scan operation first reads up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then applies +// any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey +// is present in the response, pagination is required to complete the full table +// scan. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) // in the Amazon DynamoDB Developer Guide. // // Scan operations proceed sequentially; however, for faster performance on @@ -5494,11 +6177,18 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) // in the Amazon DynamoDB Developer Guide. // -// Scan uses eventually consistent reads when accessing the data in a table; -// therefore, the result set might not include the changes to data in the table -// immediately before the operation began. If you need a consistent copy of -// the data, as of the time that the Scan begins, you can set the ConsistentRead -// parameter to true. +// By default, a Scan uses eventually consistent reads when accessing the items +// in a table. Therefore, the results from an eventually consistent Scan may +// not include the latest item changes at the time the scan iterates through +// each item in the table. If you require a strongly consistent read of each +// item as the scan iterates through the items in the table, you can set the +// ConsistentRead parameter to true. Strong consistency only relates to the +// consistency of the read at the item level. +// +// DynamoDB does not provide snapshot isolation for a scan operation when the +// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation +// does not guarantee that all reads in a scan see a consistent snapshot of +// the table when the scan operation was requested. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5691,17 +6381,26 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource // might not be specified correctly, or its status might not be ACTIVE. @@ -5824,7 +6523,7 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // - There is a user error, such as an invalid data format. // -// - The aggregate size of the items in the transaction cannot exceed 4 MB. +// - The aggregate size of the items in the transaction exceeded 4 MB. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5861,6 +6560,10 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -5911,7 +6614,7 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // as DynamoDB is automatically scaling the table. Throughput exceeds the // current capacity for one or more global secondary indexes. DynamoDB is // automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is returned when writes get throttled on an On-Demand GSI as DynamoDB // is automatically scaling the GSI. // // - Validation Error: Code: ValidationError Messages: One or more parameter @@ -6122,6 +6825,10 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -6172,7 +6879,7 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // as DynamoDB is automatically scaling the table. Throughput exceeds the // current capacity for one or more global secondary indexes. DynamoDB is // automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is returned when writes get throttled on an On-Demand GSI as DynamoDB // is automatically scaling the GSI. // // - Validation Error: Code: ValidationError Messages: One or more parameter @@ -6190,6 +6897,47 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // - TransactionInProgressException // The transaction with the given request token is already in progress. // +// Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +// // - IdempotentParameterMismatchException // DynamoDB rejected the request because you retried a request with a different // payload but with an idempotent token that was already used. @@ -6319,17 +7067,26 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource // might not be specified correctly, or its status might not be ACTIVE. @@ -6650,6 +7407,23 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // schema, have DynamoDB Streams enabled, and have the same provisioned and // maximum write capacity units. // +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html) +// instead. +// // Although you can use UpdateGlobalTable to add replicas and remove replicas // in a single request, for simplicity we recommend that you issue separate // requests for adding or removing replicas. @@ -6783,6 +7557,17 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // // Updates settings for a global table. // +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use Global Tables +// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) +// when possible, because it provides greater flexibility, higher efficiency, +// and consumes less write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see Determining the global table +// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6804,17 +7589,26 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - ResourceInUseException // The operation conflicts with the resource's availability. For example, you // attempted to recreate an existing table, or tried to delete a table currently @@ -6983,6 +7777,142 @@ func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput return out, req.Send() } +const opUpdateKinesisStreamingDestination = "UpdateKinesisStreamingDestination" + +// UpdateKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateKinesisStreamingDestination for more information on using the UpdateKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateKinesisStreamingDestinationRequest method. +// req, resp := client.UpdateKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination +func (c *DynamoDB) UpdateKinesisStreamingDestinationRequest(input *UpdateKinesisStreamingDestinationInput) (req *request.Request, output *UpdateKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opUpdateKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateKinesisStreamingDestinationInput{} + } + + output = &UpdateKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// The command to update the Kinesis stream destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination +func (c *DynamoDB) UpdateKinesisStreamingDestination(input *UpdateKinesisStreamingDestinationInput) (*UpdateKinesisStreamingDestinationOutput, error) { + req, out := c.UpdateKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// UpdateKinesisStreamingDestinationWithContext is the same as UpdateKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateKinesisStreamingDestinationWithContext(ctx aws.Context, input *UpdateKinesisStreamingDestinationInput, opts ...request.Option) (*UpdateKinesisStreamingDestinationOutput, error) { + req, out := c.UpdateKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateTable = "UpdateTable" // UpdateTableRequest generates a "aws/request.Request" representing the @@ -7054,6 +7984,9 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // Modifies the provisioned throughput settings, global secondary indexes, or // DynamoDB Streams settings for a given table. // +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). +// // You can only perform one of the following operations at once: // // - Modify the provisioned throughput settings of the table. @@ -7063,9 +7996,9 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // - Create a new global secondary index on the table. After the index begins // backfilling, you can use UpdateTable to perform other operations. // -// UpdateTable is an asynchronous operation; while it is executing, the table -// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot -// issue another UpdateTable request. When the table returns to the ACTIVE state, +// UpdateTable is an asynchronous operation; while it's executing, the table +// status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue +// another UpdateTable request. When the table returns to the ACTIVE state, // the UpdateTable operation is complete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7089,17 +8022,26 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -7170,8 +8112,8 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // // Updates auto scaling settings on your global tables at once. // -// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// For global tables, this operation only applies to global tables using Version +// 2019.11.21 (Current version). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7194,17 +8136,26 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -7348,17 +8299,26 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // - LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. // +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. +// // - InternalServerError // An error occurred on the server side. // @@ -7442,7 +8402,7 @@ func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { return s } -// Represents an attribute for describing the key schema for the table and indexes. +// Represents an attribute for describing the schema for the table and indexes. type AttributeDefinition struct { _ struct{} `type:"structure"` @@ -8528,7 +9488,7 @@ type BackupSummary struct { BackupType *string `type:"string" enum:"BackupType"` // ARN associated with the table. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` // Unique identifier for the table. TableId *string `type:"string"` @@ -8702,7 +9662,8 @@ type BatchExecuteStatementOutput struct { // are ordered according to the ordering of the statements. ConsumedCapacity []*ConsumedCapacity `type:"list"` - // The response to each PartiQL statement in the batch. + // The response to each PartiQL statement in the batch. The values of the list + // are ordered according to the ordering of the request statements. Responses []*BatchStatementResponse `type:"list"` } @@ -8740,9 +9701,9 @@ func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) type BatchGetItemInput struct { _ struct{} `type:"structure"` - // A map of one or more table names and, for each table, a map that describes - // one or more items to retrieve from that table. Each table name can be used - // only once per BatchGetItem request. + // A map of one or more table names or table ARNs and, for each table, a map + // that describes one or more items to retrieve from that table. Each table + // name or ARN can be used only once per BatchGetItem request. // // Each element in the map of items to retrieve consists of the following: // @@ -8876,9 +9837,9 @@ type BatchGetItemOutput struct { // * CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*ConsumedCapacity `type:"list"` - // A map of table name to a list of items. Each object in Responses consists - // of a table name, along with a map of attribute data consisting of the data - // type and attribute value. + // A map of table name or table ARN to a list of items. Each object in Responses + // consists of a table name or ARN, along with a map of attribute data consisting + // of the data type and attribute value. Responses map[string][]map[string]*AttributeValue `type:"map"` // A map of tables and their respective keys that were not processed with the @@ -8947,6 +9908,10 @@ type BatchStatementError struct { // The error code associated with the failed PartiQL batch statement. Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` + // The item which caused the condition check to fail. This will be set if ReturnValuesOnConditionCheckFailure + // is specified as ALL_OLD. + Item map[string]*AttributeValue `type:"map"` + // The error message associated with the PartiQL batch response. Message *string `type:"string"` } @@ -8975,6 +9940,12 @@ func (s *BatchStatementError) SetCode(v string) *BatchStatementError { return s } +// SetItem sets the Item field's value. +func (s *BatchStatementError) SetItem(v map[string]*AttributeValue) *BatchStatementError { + s.Item = v + return s +} + // SetMessage sets the Message field's value. func (s *BatchStatementError) SetMessage(v string) *BatchStatementError { s.Message = &v @@ -8991,6 +9962,14 @@ type BatchStatementRequest struct { // The parameters associated with a PartiQL statement in the batch request. Parameters []*AttributeValue `min:"1" type:"list"` + // An optional parameter that returns the item attributes for a PartiQL batch + // request operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + // A valid PartiQL statement. // // Statement is a required field @@ -9046,6 +10025,12 @@ func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStateme return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *BatchStatementRequest) SetReturnValuesOnConditionCheckFailure(v string) *BatchStatementRequest { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetStatement sets the Statement field's value. func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest { s.Statement = &v @@ -9106,9 +10091,9 @@ func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse type BatchWriteItemInput struct { _ struct{} `type:"structure"` - // A map of one or more table names and, for each table, a list of operations - // to be performed (DeleteRequest or PutRequest). Each element in the map consists - // of the following: + // A map of one or more table names or table ARNs and, for each table, a list + // of operations to be performed (DeleteRequest or PutRequest). Each element + // in the map consists of the following: // // * DeleteRequest - Perform a DeleteItem operation on the specified item. // The item to be deleted is identified by a Key subelement: Key - A map @@ -9240,11 +10225,11 @@ type BatchWriteItemOutput struct { // A map of tables and requests against those tables that were not processed. // The UnprocessedItems value is in the same form as RequestItems, so you can - // provide this value directly to a subsequent BatchGetItem operation. For more - // information, see RequestItems in the Request Parameters section. + // provide this value directly to a subsequent BatchWriteItem operation. For + // more information, see RequestItems in the Request Parameters section. // - // Each UnprocessedItems entry consists of a table name and, for that table, - // a list of operations to perform (DeleteRequest or PutRequest). + // Each UnprocessedItems entry consists of a table name or table ARN and, for + // that table, a list of operations to perform (DeleteRequest or PutRequest). // // * DeleteRequest - Perform a DeleteItem operation on the specified item. // The item to be deleted is identified by a Key subelement: Key - A map @@ -9302,7 +10287,12 @@ func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) return s } -// Contains the details for the read/write capacity mode. +// Contains the details for the read/write capacity mode. This page talks about +// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about +// these modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). +// +// You may need to switch to on-demand mode at least once in order to return +// a BillingModeSummary response. type BillingModeSummary struct { _ struct{} `type:"structure"` @@ -9650,14 +10640,20 @@ type ConditionCheck struct { _ struct{} `type:"structure"` // A condition that must be satisfied in order for a conditional update to succeed. + // For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) + // in the Amazon DynamoDB Developer Guide. // // ConditionExpression is a required field ConditionExpression *string `type:"string" required:"true"` - // One or more substitution tokens for attribute names in an expression. + // One or more substitution tokens for attribute names in an expression. For + // more information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) + // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` - // One or more values that can be substituted in an expression. + // One or more values that can be substituted in an expression. For more information, + // see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) + // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` // The primary key of the item to be checked. Each element consists of an attribute @@ -9671,10 +10667,11 @@ type ConditionCheck struct { // the valid values are: NONE and ALL_OLD. ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - // Name of the table for the check item request. + // Name of the table for the check item request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -9707,8 +10704,8 @@ func (s *ConditionCheck) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -9758,6 +10755,9 @@ type ConditionalCheckFailedException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Item which caused the ConditionalCheckFailedException. + Item map[string]*AttributeValue `type:"map"` + // The conditional request failed. Message_ *string `locationName:"message" type:"string"` } @@ -9805,7 +10805,7 @@ func (s *ConditionalCheckFailedException) OrigErr() error { } func (s *ConditionalCheckFailedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. @@ -9821,8 +10821,8 @@ func (s *ConditionalCheckFailedException) RequestID() string { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned -// if the request asked for it. For more information, see Provisioned Throughput -// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// if the request asked for it. For more information, see Provisioned capacity +// mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. type ConsumedCapacity struct { _ struct{} `type:"structure"` @@ -9842,8 +10842,10 @@ type ConsumedCapacity struct { // The amount of throughput consumed on the table affected by the operation. Table *Capacity `type:"structure"` - // The name of the table that was affected by the operation. - TableName *string `min:"3" type:"string"` + // The name of the table that was affected by the operation. If you had specified + // the Amazon Resource Name (ARN) of a table in the input, you'll see the table + // ARN in the response. + TableName *string `min:"1" type:"string"` // The total number of write capacity units consumed by the operation. WriteCapacityUnits *float64 `type:"double"` @@ -10076,10 +11078,11 @@ type CreateBackupInput struct { // BackupName is a required field BackupName *string `min:"3" type:"string" required:"true"` - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) + // of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -10112,8 +11115,8 @@ func (s *CreateBackupInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -10179,6 +11182,11 @@ type CreateGlobalSecondaryIndexAction struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // The maximum number of read and write units for the global secondary index + // being created. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into an // index. These are in addition to the primary key attributes and index key // attributes, which are automatically projected. @@ -10270,6 +11278,12 @@ func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) * return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *CreateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *CreateGlobalSecondaryIndexAction { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { s.Projection = v @@ -10436,6 +11450,11 @@ type CreateReplicationGroupMemberAction struct { // different from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` + // The maximum on-demand throughput settings for the specified replica table + // being created. You can only modify MaxReadRequestUnits, because you can't + // modify MaxWriteRequestUnits for individual replica tables. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -10511,6 +11530,12 @@ func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *Create return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *CreateReplicationGroupMemberAction { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction { s.ProvisionedThroughputOverride = v @@ -10542,12 +11567,17 @@ type CreateTableInput struct { // capacity. This setting can be changed later. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). BillingMode *string `type:"string" enum:"BillingMode"` + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + // One or more global secondary indexes (the maximum is 20) to be created on // the table. Each global secondary index in the array includes the following: // @@ -10636,6 +11666,11 @@ type CreateTableInput struct { // attributes when determining the total. LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for a specified table or index. // The settings can be modified using the UpdateTable operation. // @@ -10647,6 +11682,21 @@ type CreateTableInput struct { // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + // An Amazon Web Services resource-based policy document in JSON format that + // will be attached to the table. + // + // When you attach a resource-based policy while creating a table, the policy + // application is strongly consistent. + // + // The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against + // this limit. For a full list of all considerations that apply for resource-based + // policies, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). + // + // You need to specify the CreateTable and PutResourcePolicy IAM actions for + // authorizing a user to create a table with a resource-based policy. + ResourcePolicy *string `type:"string"` + // Represents the settings used to enable server-side encryption. SSESpecification *SSESpecification `type:"structure"` @@ -10668,10 +11718,11 @@ type CreateTableInput struct { // The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. TableClass *string `type:"string" enum:"TableClass"` - // The name of the table to create. + // The name of the table to create. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` // A list of key-value pairs to label the table. For more information, see Tagging // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). @@ -10711,8 +11762,8 @@ func (s *CreateTableInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.AttributeDefinitions != nil { for i, v := range s.AttributeDefinitions { @@ -10793,6 +11844,12 @@ func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { return s } +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *CreateTableInput) SetDeletionProtectionEnabled(v bool) *CreateTableInput { + s.DeletionProtectionEnabled = &v + return s +} + // SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { s.GlobalSecondaryIndexes = v @@ -10811,12 +11868,24 @@ func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *C return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *CreateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *CreateTableInput { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { s.ProvisionedThroughput = v return s } +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *CreateTableInput) SetResourcePolicy(v string) *CreateTableInput { + s.ResourcePolicy = &v + return s +} + // SetSSESpecification sets the SSESpecification field's value. func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { s.SSESpecification = v @@ -10963,10 +12032,11 @@ type Delete struct { // values are: NONE and ALL_OLD. ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - // Name of the table in which the item to be deleted resides. + // Name of the table in which the item to be deleted resides. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -10996,8 +12066,8 @@ func (s *Delete) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -11265,7 +12335,7 @@ type DeleteItemInput struct { // A map of attribute names to AttributeValue objects, representing the primary // key of the item to delete. // - // For the primary key, you must provide all of the attributes. For example, + // For the primary key, you must provide all of the key attributes. For example, // with a simple primary key, you only need to provide a value for the partition // key. For a composite primary key, you must provide values for both the partition // key and the sort key. @@ -11310,10 +12380,19 @@ type DeleteItemInput struct { // DeleteItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` - // The name of the table from which to delete the item. + // An optional parameter that returns the item attributes for a DeleteItem operation + // that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // The name of the table from which to delete the item. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -11343,8 +12422,8 @@ func (s *DeleteItemInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -11407,6 +12486,12 @@ func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *DeleteItemInput) SetReturnValuesOnConditionCheckFailure(v string) *DeleteItemInput { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetTableName sets the TableName field's value. func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { s.TableName = &v @@ -11426,7 +12511,7 @@ type DeleteItemOutput struct { // includes the total provisioned throughput consumed, along with statistics // for the table and any indexes involved in the operation. ConsumedCapacity // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // more information, see Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -11617,14 +12702,118 @@ func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { return s } +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // A string value that you can use to conditionally delete your policy. When + // you provide an expected revision ID, if the revision ID of the existing policy + // on the resource doesn't match or if there's no policy attached to the resource, + // the request will fail and return a PolicyNotFoundException. + ExpectedRevisionId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy + // will be removed. The resources you can specify include tables and streams. + // If you remove the policy of a table, it will also remove the permissions + // for the table's indexes defined in that policy document. This is because + // index permissions are defined in the table's policy. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} + if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpectedRevisionId sets the ExpectedRevisionId field's value. +func (s *DeleteResourcePolicyInput) SetExpectedRevisionId(v string) *DeleteResourcePolicyInput { + s.ExpectedRevisionId = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput { + s.ResourceArn = &v + return s +} + +type DeleteResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + // + // This value will be empty if you make a request against a resource without + // a policy. + RevisionId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetRevisionId sets the RevisionId field's value. +func (s *DeleteResourcePolicyOutput) SetRevisionId(v string) *DeleteResourcePolicyOutput { + s.RevisionId = &v + return s +} + // Represents the input of a DeleteTable operation. type DeleteTableInput struct { _ struct{} `type:"structure"` - // The name of the table to delete. + // The name of the table to delete. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -11651,8 +12840,8 @@ func (s *DeleteTableInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -11785,8 +12974,11 @@ type DescribeContinuousBackupsInput struct { // Name of the table for which the customer wants to check the continuous backups // and point in time recovery settings. // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. + // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -11813,8 +13005,8 @@ func (s *DescribeContinuousBackupsInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -11867,10 +13059,11 @@ type DescribeContributorInsightsInput struct { // The name of the global secondary index to describe, if applicable. IndexName *string `min:"3" type:"string"` - // The name of the table to describe. + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -11900,8 +13093,8 @@ func (s *DescribeContributorInsightsInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12405,10 +13598,11 @@ func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescripti type DescribeKinesisStreamingDestinationInput struct { _ struct{} `type:"structure"` - // The name of the table being described. + // The name of the table being described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -12435,8 +13629,8 @@ func (s *DescribeKinesisStreamingDestinationInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12583,10 +13777,11 @@ func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeL type DescribeTableInput struct { _ struct{} `type:"structure"` - // The name of the table to describe. + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -12613,8 +13808,8 @@ func (s *DescribeTableInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12664,10 +13859,11 @@ func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput type DescribeTableReplicaAutoScalingInput struct { _ struct{} `type:"structure"` - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) + // of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -12694,8 +13890,8 @@ func (s *DescribeTableReplicaAutoScalingInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12744,10 +13940,11 @@ func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v type DescribeTimeToLiveInput struct { _ struct{} `type:"structure"` - // The name of the table to be described. + // The name of the table to be described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -12774,8 +13971,8 @@ func (s *DescribeTimeToLiveInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12824,15 +14021,19 @@ func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescrip type DisableKinesisStreamingDestinationInput struct { _ struct{} `type:"structure"` + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` + // The ARN for a Kinesis data stream. // // StreamArn is a required field StreamArn *string `min:"37" type:"string" required:"true"` - // The name of the DynamoDB table. + // The name of the DynamoDB table. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -12865,8 +14066,8 @@ func (s *DisableKinesisStreamingDestinationInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -12875,6 +14076,12 @@ func (s *DisableKinesisStreamingDestinationInput) Validate() error { return nil } +// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. +func (s *DisableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationInput { + s.EnableKinesisStreamingConfiguration = v + return s +} + // SetStreamArn sets the StreamArn field's value. func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput { s.StreamArn = &v @@ -12893,6 +14100,9 @@ type DisableKinesisStreamingDestinationOutput struct { // The current status of the replication. DestinationStatus *string `type:"string" enum:"DestinationStatus"` + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` + // The ARN for the specific Kinesis data stream. StreamArn *string `min:"37" type:"string"` @@ -12924,6 +14134,12 @@ func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string return s } +// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationOutput { + s.EnableKinesisStreamingConfiguration = v + return s +} + // SetStreamArn sets the StreamArn field's value. func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput { s.StreamArn = &v @@ -13001,18 +14217,55 @@ func (s *DuplicateItemException) RequestID() string { return s.RespMetadata.RequestID } +// Enables setting the configuration for Kinesis Streaming. +type EnableKinesisStreamingConfiguration struct { + _ struct{} `type:"structure"` + + // Toggle for the precision of Kinesis data stream timestamp. The values are + // either MILLISECOND or MICROSECOND. + ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingConfiguration) GoString() string { + return s.String() +} + +// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. +func (s *EnableKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *EnableKinesisStreamingConfiguration { + s.ApproximateCreationDateTimePrecision = &v + return s +} + type EnableKinesisStreamingDestinationInput struct { _ struct{} `type:"structure"` + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` + // The ARN for a Kinesis data stream. // // StreamArn is a required field StreamArn *string `min:"37" type:"string" required:"true"` - // The name of the DynamoDB table. + // The name of the DynamoDB table. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -13045,8 +14298,8 @@ func (s *EnableKinesisStreamingDestinationInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -13055,6 +14308,12 @@ func (s *EnableKinesisStreamingDestinationInput) Validate() error { return nil } +// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. +func (s *EnableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationInput { + s.EnableKinesisStreamingConfiguration = v + return s +} + // SetStreamArn sets the StreamArn field's value. func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput { s.StreamArn = &v @@ -13073,6 +14332,9 @@ type EnableKinesisStreamingDestinationOutput struct { // The current status of the replication. DestinationStatus *string `type:"string" enum:"DestinationStatus"` + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` + // The ARN for the specific Kinesis data stream. StreamArn *string `min:"37" type:"string"` @@ -13104,6 +14366,12 @@ func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) return s } +// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationOutput { + s.EnableKinesisStreamingConfiguration = v + return s +} + // SetStreamArn sets the StreamArn field's value. func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput { s.StreamArn = &v @@ -13200,6 +14468,14 @@ type ExecuteStatementInput struct { // * NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + // An optional parameter that returns the item attributes for an ExecuteStatement + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + // The PartiQL statement representing the operation to run. // // Statement is a required field @@ -13279,6 +14555,12 @@ func (s *ExecuteStatementInput) SetReturnConsumedCapacity(v string) *ExecuteStat return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *ExecuteStatementInput) SetReturnValuesOnConditionCheckFailure(v string) *ExecuteStatementInput { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetStatement sets the Statement field's value. func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput { s.Statement = &v @@ -13291,8 +14573,8 @@ type ExecuteStatementOutput struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned - // if the request asked for it. For more information, see Provisioned Throughput - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // if the request asked for it. For more information, see Provisioned capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -13807,12 +15089,18 @@ type ExportDescription struct { // Point in time from which table data was exported. ExportTime *time.Time `type:"timestamp"` + // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. + ExportType *string `type:"string" enum:"ExportType"` + // Status code for the result of the failed export. FailureCode *string `type:"string"` // Export failure reason description. FailureMessage *string `type:"string"` + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` + // The number of items exported. ItemCount *int64 `type:"long"` @@ -13843,7 +15131,7 @@ type ExportDescription struct { StartTime *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of the table that was exported. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` // Unique ID of the table that was exported. TableId *string `type:"string"` @@ -13915,6 +15203,12 @@ func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { return s } +// SetExportType sets the ExportType field's value. +func (s *ExportDescription) SetExportType(v string) *ExportDescription { + s.ExportType = &v + return s +} + // SetFailureCode sets the FailureCode field's value. func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { s.FailureCode = &v @@ -13927,6 +15221,12 @@ func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { return s } +// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. +func (s *ExportDescription) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportDescription { + s.IncrementalExportSpecification = v + return s +} + // SetItemCount sets the ItemCount field's value. func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { s.ItemCount = &v @@ -14055,6 +15355,9 @@ type ExportSummary struct { // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or // FAILED. ExportStatus *string `type:"string" enum:"ExportStatus"` + + // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. + ExportType *string `type:"string" enum:"ExportType"` } // String returns the string representation. @@ -14087,6 +15390,12 @@ func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { return s } +// SetExportType sets the ExportType field's value. +func (s *ExportSummary) SetExportType(v string) *ExportSummary { + s.ExportType = &v + return s +} + type ExportTableToPointInTimeInput struct { _ struct{} `type:"structure"` @@ -14112,6 +15421,15 @@ type ExportTableToPointInTimeInput struct { // state at this point in time. ExportTime *time.Time `type:"timestamp"` + // Choice of whether to execute as a full export or incremental export. Valid + // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. + // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must + // also be used. + ExportType *string `type:"string" enum:"ExportType"` + + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` + // The name of the Amazon S3 bucket to export the snapshot to. // // S3Bucket is a required field @@ -14119,6 +15437,9 @@ type ExportTableToPointInTimeInput struct { // The ID of the Amazon Web Services account that owns the bucket the export // will be stored in. + // + // S3BucketOwner is a required parameter when exporting to a S3 bucket in another + // account. S3BucketOwner *string `type:"string"` // The Amazon S3 bucket prefix to use as the file name and path of the exported @@ -14140,7 +15461,7 @@ type ExportTableToPointInTimeInput struct { // The Amazon Resource Name (ARN) associated with the table to export. // // TableArn is a required field - TableArn *string `type:"string" required:"true"` + TableArn *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -14173,6 +15494,9 @@ func (s *ExportTableToPointInTimeInput) Validate() error { if s.TableArn == nil { invalidParams.Add(request.NewErrParamRequired("TableArn")) } + if s.TableArn != nil && len(*s.TableArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14198,6 +15522,18 @@ func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableT return s } +// SetExportType sets the ExportType field's value. +func (s *ExportTableToPointInTimeInput) SetExportType(v string) *ExportTableToPointInTimeInput { + s.ExportType = &v + return s +} + +// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. +func (s *ExportTableToPointInTimeInput) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportTableToPointInTimeInput { + s.IncrementalExportSpecification = v + return s +} + // SetS3Bucket sets the S3Bucket field's value. func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { s.S3Bucket = &v @@ -14328,10 +15664,11 @@ type Get struct { // they do not appear in the result. ProjectionExpression *string `type:"string"` - // The name of the table from which to retrieve the specified item. + // The name of the table from which to retrieve the specified item. You can + // also provide the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -14361,8 +15698,8 @@ func (s *Get) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -14484,10 +15821,11 @@ type GetItemInput struct { // * NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - // The name of the table containing the requested item. + // The name of the table containing the requested item. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -14520,8 +15858,8 @@ func (s *GetItemInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -14580,7 +15918,7 @@ type GetItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -14618,6 +15956,98 @@ func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput { return s } +type GetResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy + // is attached. The resources you can specify include tables and streams. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput { + s.ResourceArn = &v + return s +} + +type GetResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The resource-based policy document attached to the resource, which can be + // a table or stream, in JSON format. + Policy *string `type:"string"` + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { + s.Policy = &v + return s +} + +// SetRevisionId sets the RevisionId field's value. +func (s *GetResourcePolicyOutput) SetRevisionId(v string) *GetResourcePolicyOutput { + s.RevisionId = &v + return s +} + // Represents the properties of a global secondary index. type GlobalSecondaryIndex struct { _ struct{} `type:"structure"` @@ -14647,6 +16077,11 @@ type GlobalSecondaryIndex struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -14738,6 +16173,12 @@ func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecond return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndex) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndex { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { s.Projection = v @@ -14874,6 +16315,11 @@ type GlobalSecondaryIndexDescription struct { // key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement `min:"1" type:"list"` + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -14948,6 +16394,12 @@ func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *G return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndexDescription) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexDescription { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { s.Projection = v @@ -14985,6 +16437,11 @@ type GlobalSecondaryIndexInfo struct { // key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement `min:"1" type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. @@ -15025,6 +16482,12 @@ func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSe return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *GlobalSecondaryIndexInfo) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexInfo { + s.OnDemandThroughput = v + return s +} + // SetProjection sets the Projection field's value. func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { s.Projection = v @@ -15681,7 +17144,7 @@ type ImportSummary struct { StartTime *time.Time `type:"timestamp"` // The Amazon Resource Number (ARN) of the table being imported into. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` } // String returns the string representation. @@ -15812,7 +17275,7 @@ type ImportTableDescription struct { StartTime *time.Time `type:"timestamp"` // The Amazon Resource Number (ARN) of the table being imported into. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` // The parameters for the new table that is being imported into. TableCreationParameters *TableCreationParameters `type:"structure"` @@ -16115,6 +17578,62 @@ func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) return s } +// Optional object containing the parameters specific to an incremental export. +type IncrementalExportSpecification struct { + _ struct{} `type:"structure"` + + // Time in the past which provides the inclusive start range for the export + // table's data, counted in seconds from the start of the Unix epoch. The incremental + // export will reflect the table's state including and after this point in time. + ExportFromTime *time.Time `type:"timestamp"` + + // Time in the past which provides the exclusive end range for the export table's + // data, counted in seconds from the start of the Unix epoch. The incremental + // export will reflect the table's state just prior to this point in time. If + // this is not provided, the latest time with data available will be used. + ExportToTime *time.Time `type:"timestamp"` + + // The view type that was chosen for the export. Valid values are NEW_AND_OLD_IMAGES + // and NEW_IMAGES. The default value is NEW_AND_OLD_IMAGES. + ExportViewType *string `type:"string" enum:"ExportViewType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalExportSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IncrementalExportSpecification) GoString() string { + return s.String() +} + +// SetExportFromTime sets the ExportFromTime field's value. +func (s *IncrementalExportSpecification) SetExportFromTime(v time.Time) *IncrementalExportSpecification { + s.ExportFromTime = &v + return s +} + +// SetExportToTime sets the ExportToTime field's value. +func (s *IncrementalExportSpecification) SetExportToTime(v time.Time) *IncrementalExportSpecification { + s.ExportToTime = &v + return s +} + +// SetExportViewType sets the ExportViewType field's value. +func (s *IncrementalExportSpecification) SetExportViewType(v string) *IncrementalExportSpecification { + s.ExportViewType = &v + return s +} + // The operation tried to access a nonexistent index. type IndexNotFoundException struct { _ struct{} `type:"structure"` @@ -16807,6 +18326,10 @@ func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes type KinesisDataStreamDestination struct { _ struct{} `type:"structure"` + // The precision of the Kinesis data stream timestamp. The values are either + // MILLISECOND or MICROSECOND. + ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` + // The current status of replication. DestinationStatus *string `type:"string" enum:"DestinationStatus"` @@ -16835,6 +18358,12 @@ func (s KinesisDataStreamDestination) GoString() string { return s.String() } +// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. +func (s *KinesisDataStreamDestination) SetApproximateCreationDateTimePrecision(v string) *KinesisDataStreamDestination { + s.ApproximateCreationDateTimePrecision = &v + return s +} + // SetDestinationStatus sets the DestinationStatus field's value. func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination { s.DestinationStatus = &v @@ -16855,16 +18384,25 @@ func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStream // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 500 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 250 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. // // There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same +// time. Exceeding this limit may result in request throttling. type LimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -16953,8 +18491,9 @@ type ListBackupsInput struct { // Maximum number of backups to return at once. Limit *int64 `min:"1" type:"integer"` - // The backups from the table specified by TableName are listed. - TableName *string `min:"3" type:"string"` + // Lists the backups from the table specified in TableName. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. + TableName *string `min:"1" type:"string"` // Only backups created after this time are listed. TimeRangeLowerBound is inclusive. TimeRangeLowerBound *time.Time `type:"timestamp"` @@ -16991,8 +18530,8 @@ func (s *ListBackupsInput) Validate() error { if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -17096,8 +18635,9 @@ type ListContributorInsightsInput struct { // A token to for the desired page, if there is one. NextToken *string `type:"string"` - // The name of the table. - TableName *string `min:"3" type:"string"` + // The name of the table. You can also provide the Amazon Resource Name (ARN) + // of the table in this parameter. + TableName *string `min:"1" type:"string"` } // String returns the string representation. @@ -17121,8 +18661,8 @@ func (s ListContributorInsightsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListContributorInsightsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListContributorInsightsInput"} - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -17201,7 +18741,7 @@ type ListExportsInput struct { NextToken *string `type:"string"` // The Amazon Resource Name (ARN) associated with the exported table. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` } // String returns the string representation. @@ -17228,6 +18768,9 @@ func (s *ListExportsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.TableArn != nil && len(*s.TableArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -17418,7 +18961,7 @@ type ListImportsInput struct { // The Amazon Resource Name (ARN) associated with the table that was imported // to. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` } // String returns the string representation. @@ -17448,6 +18991,9 @@ func (s *ListImportsInput) Validate() error { if s.PageSize != nil && *s.PageSize < 1 { invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) } + if s.TableArn != nil && len(*s.TableArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -18004,14 +19550,108 @@ func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIn return s } -// Represents a PartiQL statment that uses parameters. +// Sets the maximum number of read and write units for the specified on-demand +// table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, +// or both. +type OnDemandThroughput struct { + _ struct{} `type:"structure"` + + // Maximum number of read request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits + // as greater than or equal to 1. To remove the maximum OnDemandThroughput that + // is currently set on your table, set the value of MaxReadRequestUnits to -1. + MaxReadRequestUnits *int64 `type:"long"` + + // Maximum number of write request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits + // as greater than or equal to 1. To remove the maximum OnDemandThroughput that + // is currently set on your table, set the value of MaxWriteRequestUnits to + // -1. + MaxWriteRequestUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughput) GoString() string { + return s.String() +} + +// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. +func (s *OnDemandThroughput) SetMaxReadRequestUnits(v int64) *OnDemandThroughput { + s.MaxReadRequestUnits = &v + return s +} + +// SetMaxWriteRequestUnits sets the MaxWriteRequestUnits field's value. +func (s *OnDemandThroughput) SetMaxWriteRequestUnits(v int64) *OnDemandThroughput { + s.MaxWriteRequestUnits = &v + return s +} + +// Overrides the on-demand throughput settings for this replica table. If you +// don't specify a value for this parameter, it uses the source table's on-demand +// throughput settings. +type OnDemandThroughputOverride struct { + _ struct{} `type:"structure"` + + // Maximum number of read request units for the specified replica table. + MaxReadRequestUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughputOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OnDemandThroughputOverride) GoString() string { + return s.String() +} + +// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. +func (s *OnDemandThroughputOverride) SetMaxReadRequestUnits(v int64) *OnDemandThroughputOverride { + s.MaxReadRequestUnits = &v + return s +} + +// Represents a PartiQL statement that uses parameters. type ParameterizedStatement struct { _ struct{} `type:"structure"` // The parameter values. Parameters []*AttributeValue `min:"1" type:"list"` - // A PartiQL statment that uses parameters. + // An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // A PartiQL statement that uses parameters. // // Statement is a required field Statement *string `min:"1" type:"string" required:"true"` @@ -18060,6 +19700,12 @@ func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *Parameteriz return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *ParameterizedStatement) SetReturnValuesOnConditionCheckFailure(v string) *ParameterizedStatement { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetStatement sets the Statement field's value. func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement { s.Statement = &v @@ -18233,6 +19879,73 @@ func (s *PointInTimeRecoveryUnavailableException) RequestID() string { return s.RespMetadata.RequestID } +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId, it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. +type PolicyNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyNotFoundException) GoString() string { + return s.String() +} + +func newErrorPolicyNotFoundException(v protocol.ResponseMetadata) error { + return &PolicyNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PolicyNotFoundException) Code() string { + return "PolicyNotFoundException" +} + +// Message returns the exception's message. +func (s *PolicyNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PolicyNotFoundException) OrigErr() error { + return nil +} + +func (s *PolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + // Represents attributes that are copied (projected) from the table into an // index. These are in addition to the primary key attributes and index key // attributes, which are automatically projected. @@ -18255,6 +19968,8 @@ type Projection struct { // secondary index will include other non-key attributes that you specify. // // * ALL - All of the table attributes are projected into the index. + // + // When using the DynamoDB console, ALL is selected by default. ProjectionType *string `type:"string" enum:"ProjectionType"` } @@ -18312,7 +20027,7 @@ type ProvisionedThroughput struct { // The maximum number of strongly consistent reads consumed per second before // DynamoDB returns a ThrottlingException. For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) // in the Amazon DynamoDB Developer Guide. // // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. @@ -18322,7 +20037,7 @@ type ProvisionedThroughput struct { // The maximum number of writes consumed per second before DynamoDB returns // a ThrottlingException. For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) // in the Amazon DynamoDB Developer Guide. // // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. @@ -18603,10 +20318,11 @@ type Put struct { // are: NONE and ALL_OLD. ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - // Name of the table in which to write the item. + // Name of the table in which to write the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -18636,8 +20352,8 @@ func (s *Put) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -18838,10 +20554,19 @@ type PutItemInput struct { // PutItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` - // The name of the table to contain the item. + // An optional parameter that returns the item attributes for a PutItem operation + // that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // The name of the table to contain the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -18871,8 +20596,8 @@ func (s *PutItemInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -18935,6 +20660,12 @@ func (s *PutItemInput) SetReturnValues(v string) *PutItemInput { return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *PutItemInput) SetReturnValuesOnConditionCheckFailure(v string) *PutItemInput { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetTableName sets the TableName field's value. func (s *PutItemInput) SetTableName(v string) *PutItemInput { s.TableName = &v @@ -18954,7 +20685,7 @@ type PutItemOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -19053,6 +20784,151 @@ func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest { return s } +type PutResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change the policy of this resource in the future. + ConfirmRemoveSelfResourceAccess *bool `type:"boolean"` + + // A string value that you can use to conditionally update your policy. You + // can provide the revision ID of your existing policy to make mutating requests + // against that policy. + // + // When you provide an expected revision ID, if the revision ID of the existing + // policy on the resource doesn't match or if there's no policy attached to + // the resource, your request will be rejected with a PolicyNotFoundException. + // + // To conditionally attach a policy when no policy exists for the resource, + // specify NO_POLICY for the revision ID. + ExpectedRevisionId *string `min:"1" type:"string"` + + // An Amazon Web Services resource-based policy document in JSON format. + // + // * The maximum size supported for a resource-based policy document is 20 + // KB. DynamoDB counts whitespaces when calculating the size of a policy + // against this limit. + // + // * Within a resource-based policy, if the action for a DynamoDB service-linked + // role (SLR) to replicate data for a global table is denied, adding or deleting + // a replica will fail with an error. + // + // For a full list of all considerations that apply while attaching a resource-based + // policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). + // + // Policy is a required field + Policy *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy + // will be attached. The resources you can specify include tables and streams. + // + // You can control index permissions using the base table's policy. To specify + // the same permission level for your table and its indexes, you can provide + // both the table and index Amazon Resource Name (ARN)s in the Resource field + // of a given Statement in your policy document. Alternatively, to specify different + // permissions for your table, indexes, or both, you can define multiple Statement + // fields in your policy document. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfirmRemoveSelfResourceAccess sets the ConfirmRemoveSelfResourceAccess field's value. +func (s *PutResourcePolicyInput) SetConfirmRemoveSelfResourceAccess(v bool) *PutResourcePolicyInput { + s.ConfirmRemoveSelfResourceAccess = &v + return s +} + +// SetExpectedRevisionId sets the ExpectedRevisionId field's value. +func (s *PutResourcePolicyInput) SetExpectedRevisionId(v string) *PutResourcePolicyInput { + s.ExpectedRevisionId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput { + s.Policy = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput { + s.ResourceArn = &v + return s +} + +type PutResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetRevisionId sets the RevisionId field's value. +func (s *PutResourcePolicyOutput) SetRevisionId(v string) *PutResourcePolicyOutput { + s.RevisionId = &v + return s +} + // Represents the input of a Query operation. type QueryInput struct { _ struct{} `type:"structure"` @@ -19152,7 +21028,7 @@ type QueryInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html) // in the Amazon DynamoDB Developer Guide. FilterExpression *string `type:"string"` @@ -19312,7 +21188,8 @@ type QueryInput struct { // to specifying ALL_ATTRIBUTES. // // * COUNT - Returns the number of matching items, rather than the matching - // items themselves. + // items themselves. Note that this uses the same quantity of read capacity + // units as getting the items, and is subject to the same item size calculations. // // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. // This return value is equivalent to specifying ProjectionExpression without @@ -19338,10 +21215,11 @@ type QueryInput struct { // error. Select *string `type:"string" enum:"Select"` - // The name of the table containing the requested items. + // The name of the table containing the requested items. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -19377,8 +21255,8 @@ func (s *QueryInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.KeyConditions != nil { for i, v := range s.KeyConditions { @@ -19517,7 +21395,7 @@ type QueryOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -19871,6 +21749,10 @@ type ReplicaDescription struct { // The KMS key of the replica that will be used for KMS encryption. KMSMasterKeyId *string `type:"string"` + // Overrides the maximum on-demand throughput settings for the specified replica + // table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not described, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -19946,6 +21828,12 @@ func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription { return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaDescription { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription { s.ProvisionedThroughputOverride = v @@ -19997,6 +21885,10 @@ type ReplicaGlobalSecondaryIndex struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` + // Overrides the maximum on-demand throughput settings for the specified global + // secondary index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica table GSI-specific provisioned throughput. If not specified, uses // the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -20047,6 +21939,12 @@ func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecon return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndex) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndex { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex { s.ProvisionedThroughputOverride = v @@ -20065,7 +21963,8 @@ type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { // // * CREATING - The index is being created. // - // * UPDATING - The index is being updated. + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING // // * DELETING - The index is being deleted. // @@ -20191,6 +22090,10 @@ type ReplicaGlobalSecondaryIndexDescription struct { // The name of the global secondary index. IndexName *string `min:"3" type:"string"` + // Overrides the maximum on-demand throughput for the specified global secondary + // index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // If not described, uses the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` } @@ -20219,6 +22122,12 @@ func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *Replica return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { s.ProvisionedThroughputOverride = v @@ -21064,7 +22973,7 @@ type RestoreSummary struct { SourceBackupArn *string `min:"37" type:"string"` // The ARN of the source table of the backup that is being restored. - SourceTableArn *string `type:"string"` + SourceTableArn *string `min:"1" type:"string"` } // String returns the string representation. @@ -21130,6 +23039,11 @@ type RestoreTableFromBackupInput struct { // all of the indexes at the time of restore. LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` @@ -21231,6 +23145,12 @@ func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalS return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *RestoreTableFromBackupInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableFromBackupInput { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { s.ProvisionedThroughputOverride = v @@ -21296,6 +23216,11 @@ type RestoreTableToPointInTimeInput struct { // all of the indexes at the time of restore. LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` @@ -21307,7 +23232,7 @@ type RestoreTableToPointInTimeInput struct { // The DynamoDB table that will be restored. This value is an Amazon Resource // Name (ARN). - SourceTableArn *string `type:"string"` + SourceTableArn *string `min:"1" type:"string"` // Name of the source table that is being restored. SourceTableName *string `min:"3" type:"string"` @@ -21343,6 +23268,9 @@ func (s RestoreTableToPointInTimeInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RestoreTableToPointInTimeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"} + if s.SourceTableArn != nil && len(*s.SourceTableArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceTableArn", 1)) + } if s.SourceTableName != nil && len(*s.SourceTableName) < 3 { invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3)) } @@ -21402,6 +23330,12 @@ func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*Loc return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableToPointInTimeInput { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { s.ProvisionedThroughputOverride = v @@ -21779,7 +23713,7 @@ type ScanInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) // in the Amazon DynamoDB Developer Guide. FilterExpression *string `type:"string"` @@ -21867,7 +23801,8 @@ type ScanInput struct { // to specifying ALL_ATTRIBUTES. // // * COUNT - Returns the number of matching items, rather than the matching - // items themselves. + // items themselves. Note that this uses the same quantity of read capacity + // units as getting the items, and is subject to the same item size calculations. // // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. // This return value is equivalent to specifying ProjectionExpression without @@ -21893,11 +23828,14 @@ type ScanInput struct { // error. Select *string `type:"string" enum:"Select"` - // The name of the table containing the requested items; or, if you provide - // IndexName, the name of the table to which that index belongs. + // The name of the table containing the requested items or if you provide IndexName, + // the name of the table to which that index belongs. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` // For a parallel Scan request, TotalSegments represents the total number of // segments into which the Scan operation will be divided. The value of TotalSegments @@ -21946,8 +23884,8 @@ func (s *ScanInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.TotalSegments != nil && *s.TotalSegments < 1 { invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) @@ -22073,7 +24011,7 @@ type ScanOutput struct { // the total provisioned throughput consumed, along with statistics for the // table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -22182,13 +24120,18 @@ type SourceTableDetails struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Read IOPs and Write IOPS on the table when the backup was created. // // ProvisionedThroughput is a required field ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` // ARN of the table for which backup was created. - TableArn *string `type:"string"` + TableArn *string `min:"1" type:"string"` // Time when the source table was created. // @@ -22245,6 +24188,12 @@ func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDet return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *SourceTableDetails) SetOnDemandThroughput(v *OnDemandThroughput) *SourceTableDetails { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { s.ProvisionedThroughput = v @@ -22612,6 +24561,11 @@ type TableCreationParameters struct { // KeySchema is a required field KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for a specified table or index. // The settings can be modified using the UpdateTable operation. // @@ -22731,6 +24685,12 @@ func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCrea return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *TableCreationParameters) SetOnDemandThroughput(v *OnDemandThroughput) *TableCreationParameters { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { s.ProvisionedThroughput = v @@ -22773,6 +24733,10 @@ type TableDescription struct { // format. CreationDateTime *time.Time `type:"timestamp"` + // Indicates whether deletion protection is enabled (true) or disabled (false) + // on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + // The global secondary indexes, if any, on the table. Each index is scoped // to a given partition key value. Each element is composed of: // @@ -22908,6 +24872,11 @@ type TableDescription struct { // be returned. LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + // The maximum number of read and write units for the specified on-demand table. + // If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, + // or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // The provisioned throughput settings for the table, consisting of read and // write capacity units, along with data about increases and decreases. ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` @@ -22945,7 +24914,8 @@ type TableDescription struct { // // * CREATING - The table is being created. // - // * UPDATING - The table is being updated. + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING. // // * DELETING - The table is being deleted. // @@ -23006,6 +24976,12 @@ func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription { return s } +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *TableDescription) SetDeletionProtectionEnabled(v bool) *TableDescription { + s.DeletionProtectionEnabled = &v + return s +} + // SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription { s.GlobalSecondaryIndexes = v @@ -23048,6 +25024,12 @@ func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDesc return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *TableDescription) SetOnDemandThroughput(v *OnDemandThroughput) *TableDescription { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { s.ProvisionedThroughput = v @@ -23798,7 +25780,7 @@ type TransactWriteItemsInput struct { // // Although multiple identical calls using the same client request token produce // the same result on the server (no side effects), the responses to the calls - // might not be the same. If the ReturnConsumedCapacity> parameter is set, then + // might not be the same. If the ReturnConsumedCapacity parameter is set, then // the initial TransactWriteItems call returns the amount of write capacity // units consumed in making the changes. Subsequent TransactWriteItems calls // with the same client token return the number of read capacity units consumed @@ -23981,6 +25963,10 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item // // - There is a user error, such as an invalid data format. // +// - There is an ongoing TransactWriteItems operation that conflicts with +// a concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException. +// // DynamoDB cancels a TransactGetItems request under the following circumstances: // // - There is an ongoing TransactGetItems operation that conflicts with a @@ -24031,7 +26017,7 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item // as DynamoDB is automatically scaling the table. Throughput exceeds the // current capacity for one or more global secondary indexes. DynamoDB is // automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is returned when writes get throttled on an On-Demand GSI as DynamoDB // is automatically scaling the GSI. // // - Validation Error: Code: ValidationError Messages: One or more parameter @@ -24176,6 +26162,47 @@ func (s *TransactionConflictException) RequestID() string { } // The transaction with the given request token is already in progress. +// +// # Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) type TransactionInProgressException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -24347,13 +26374,14 @@ type Update struct { // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid - // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW. + // values are: NONE and ALL_OLD. ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - // Name of the table for the UpdateItem request. + // Name of the table for the UpdateItem request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` // An expression that defines one or more attributes to be updated, the action // to be performed on them, and new value(s) for them. @@ -24389,8 +26417,8 @@ func (s *Update) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.UpdateExpression == nil { invalidParams.Add(request.NewErrParamRequired("UpdateExpression")) @@ -24452,10 +26480,11 @@ type UpdateContinuousBackupsInput struct { // PointInTimeRecoverySpecification is a required field PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"` - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) + // of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -24485,8 +26514,8 @@ func (s *UpdateContinuousBackupsInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.PointInTimeRecoverySpecification != nil { if err := s.PointInTimeRecoverySpecification.Validate(); err != nil { @@ -24555,10 +26584,11 @@ type UpdateContributorInsightsInput struct { // The global secondary index name, if applicable. IndexName *string `min:"3" type:"string"` - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) + // of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -24591,8 +26621,8 @@ func (s *UpdateContributorInsightsInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -24678,15 +26708,18 @@ type UpdateGlobalSecondaryIndexAction struct { // IndexName is a required field IndexName *string `min:"3" type:"string" required:"true"` + // Updates the maximum number of read and write units for the specified global + // secondary index. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // Represents the provisioned throughput settings for the specified global secondary // index. // // For current minimum and maximum provisioned throughput values, see Service, // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. - // - // ProvisionedThroughput is a required field - ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` } // String returns the string representation. @@ -24716,9 +26749,6 @@ func (s *UpdateGlobalSecondaryIndexAction) Validate() error { if s.IndexName != nil && len(*s.IndexName) < 3 { invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) } - if s.ProvisionedThroughput == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) - } if s.ProvisionedThroughput != nil { if err := s.ProvisionedThroughput.Validate(); err != nil { invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) @@ -24737,6 +26767,12 @@ func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalS return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateGlobalSecondaryIndexAction { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { s.ProvisionedThroughput = v @@ -24854,10 +26890,11 @@ type UpdateGlobalTableSettingsInput struct { // the global table defaults to PROVISIONED capacity billing mode. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` // Represents the settings of a global secondary index for a global table that @@ -25152,7 +27189,8 @@ type UpdateItemInput struct { ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` // Use ReturnValues if you want to get the item attributes as they appear before - // or after they are updated. For UpdateItem, the valid values are: + // or after they are successfully updated. For UpdateItem, the valid values + // are: // // * NONE - If ReturnValues is not specified, or if its value is NONE, then // nothing is returned. (This setting is the default for ReturnValues.) @@ -25176,10 +27214,19 @@ type UpdateItemInput struct { // The values returned are strongly consistent. ReturnValues *string `type:"string" enum:"ReturnValue"` - // The name of the table containing the item to update. + // An optional parameter that returns the item attributes for an UpdateItem + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // The name of the table containing the item to update. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` // An expression that defines one or more attributes to be updated, the action // to be performed on them, and new values for them. @@ -25268,8 +27315,8 @@ func (s *UpdateItemInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -25338,6 +27385,12 @@ func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput { return s } +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *UpdateItemInput) SetReturnValuesOnConditionCheckFailure(v string) *UpdateItemInput { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + // SetTableName sets the TableName field's value. func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput { s.TableName = &v @@ -25357,15 +27410,16 @@ type UpdateItemOutput struct { // A map of attribute values as they appear before or after the UpdateItem operation, // as determined by the ReturnValues parameter. // - // The Attributes map is only present if ReturnValues was specified as something - // other than NONE in the request. Each element represents one attribute. + // The Attributes map is only present if the update was successful and ReturnValues + // was specified as something other than NONE in the request. Each element represents + // one attribute. Attributes map[string]*AttributeValue `type:"map"` // The capacity units consumed by the UpdateItem operation. The data returned // includes the total provisioned throughput consumed, along with statistics // for the table and any indexes involved in the operation. ConsumedCapacity // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // more information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) // in the Amazon DynamoDB Developer Guide. ConsumedCapacity *ConsumedCapacity `type:"structure"` @@ -25426,6 +27480,172 @@ func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *U return s } +// Enables updating the configuration for Kinesis Streaming. +type UpdateKinesisStreamingConfiguration struct { + _ struct{} `type:"structure"` + + // Enables updating the precision of Kinesis data stream timestamp. + ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingConfiguration) GoString() string { + return s.String() +} + +// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. +func (s *UpdateKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *UpdateKinesisStreamingConfiguration { + s.ApproximateCreationDateTimePrecision = &v + return s +} + +type UpdateKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the Kinesis stream input. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The table name for the Kinesis streaming destination input. You can also + // provide the ARN of the table in this parameter. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` + + // The command to update the Kinesis stream configuration. + UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamArn sets the StreamArn field's value. +func (s *UpdateKinesisStreamingDestinationInput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationInput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateKinesisStreamingDestinationInput) SetTableName(v string) *UpdateKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value. +func (s *UpdateKinesisStreamingDestinationInput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationInput { + s.UpdateKinesisStreamingConfiguration = v + return s +} + +type UpdateKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The status of the attempt to update the Kinesis streaming destination output. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the Kinesis stream input. + StreamArn *string `min:"37" type:"string"` + + // The table name for the Kinesis streaming destination output. + TableName *string `min:"3" type:"string"` + + // The command to update the Kinesis streaming destination configuration. + UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *UpdateKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *UpdateKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *UpdateKinesisStreamingDestinationOutput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateKinesisStreamingDestinationOutput) SetTableName(v string) *UpdateKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value. +func (s *UpdateKinesisStreamingDestinationOutput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationOutput { + s.UpdateKinesisStreamingConfiguration = v + return s +} + // Represents a replica to be modified. type UpdateReplicationGroupMemberAction struct { _ struct{} `type:"structure"` @@ -25439,6 +27659,9 @@ type UpdateReplicationGroupMemberAction struct { // from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` + // Overrides the maximum on-demand throughput for the replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` @@ -25514,6 +27737,12 @@ func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *Update return s } +// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *UpdateReplicationGroupMemberAction { + s.OnDemandThroughputOverride = v + return s +} + // SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction { s.ProvisionedThroughputOverride = v @@ -25548,12 +27777,17 @@ type UpdateTableInput struct { // table and global secondary indexes over the past 30 minutes. // // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). // // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity + // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). BillingMode *string `type:"string" enum:"BillingMode"` + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + // An array of one or more global secondary indexes for the table. For each // index in the array, you can request one action: // @@ -25571,13 +27805,18 @@ type UpdateTableInput struct { // in the Amazon DynamoDB Developer Guide. GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + // Updates the maximum number of read and write units for the specified table + // in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, + // MaxWriteRequestUnits, or both. + OnDemandThroughput *OnDemandThroughput `type:"structure"` + // The new provisioned throughput settings for the specified table or index. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` // A list of replica update actions (create, delete, or update) for the table. // - // This property only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) - // of global tables. + // For global tables, this property only applies to global tables using Version + // 2019.11.21 (Current version). ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"` // The new server-side encryption settings for the specified table. @@ -25585,7 +27824,7 @@ type UpdateTableInput struct { // Represents the DynamoDB Streams configuration for the table. // - // You receive a ResourceInUseException if you try to enable a stream on a table + // You receive a ValidationException if you try to enable a stream on a table // that already has a stream, or if you try to disable a stream on a table that // doesn't have a stream. StreamSpecification *StreamSpecification `type:"structure"` @@ -25594,10 +27833,11 @@ type UpdateTableInput struct { // STANDARD_INFREQUENT_ACCESS. TableClass *string `type:"string" enum:"TableClass"` - // The name of the table to be updated. + // The name of the table to be updated. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -25627,8 +27867,8 @@ func (s *UpdateTableInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.AttributeDefinitions != nil { for i, v := range s.AttributeDefinitions { @@ -25689,12 +27929,24 @@ func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput { return s } +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *UpdateTableInput) SetDeletionProtectionEnabled(v bool) *UpdateTableInput { + s.DeletionProtectionEnabled = &v + return s +} + // SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput { s.GlobalSecondaryIndexUpdates = v return s } +// SetOnDemandThroughput sets the OnDemandThroughput field's value. +func (s *UpdateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateTableInput { + s.OnDemandThroughput = v + return s +} + // SetProvisionedThroughput sets the ProvisionedThroughput field's value. func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { s.ProvisionedThroughput = v @@ -25778,10 +28030,11 @@ type UpdateTableReplicaAutoScalingInput struct { // modified. ReplicaUpdates []*ReplicaAutoScalingUpdate `min:"1" type:"list"` - // The name of the global table to be updated. + // The name of the global table to be updated. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -25814,8 +28067,8 @@ func (s *UpdateTableReplicaAutoScalingInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.GlobalSecondaryIndexUpdates != nil { for i, v := range s.GlobalSecondaryIndexUpdates { @@ -25908,10 +28161,11 @@ func (s *UpdateTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v * type UpdateTimeToLiveInput struct { _ struct{} `type:"structure"` - // The name of the table to be configured. + // The name of the table to be configured. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + TableName *string `min:"1" type:"string" required:"true"` // Represents the settings used to enable or disable Time to Live for the specified // table. @@ -25944,8 +28198,8 @@ func (s *UpdateTimeToLiveInput) Validate() error { if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if s.TimeToLiveSpecification == nil { invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification")) @@ -26049,6 +28303,22 @@ func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest { return s } +const ( + // ApproximateCreationDateTimePrecisionMillisecond is a ApproximateCreationDateTimePrecision enum value + ApproximateCreationDateTimePrecisionMillisecond = "MILLISECOND" + + // ApproximateCreationDateTimePrecisionMicrosecond is a ApproximateCreationDateTimePrecision enum value + ApproximateCreationDateTimePrecisionMicrosecond = "MICROSECOND" +) + +// ApproximateCreationDateTimePrecision_Values returns all elements of the ApproximateCreationDateTimePrecision enum +func ApproximateCreationDateTimePrecision_Values() []string { + return []string{ + ApproximateCreationDateTimePrecisionMillisecond, + ApproximateCreationDateTimePrecisionMicrosecond, + } +} + const ( // AttributeActionAdd is a AttributeAction enum value AttributeActionAdd = "ADD" @@ -26352,6 +28622,9 @@ const ( // DestinationStatusEnableFailed is a DestinationStatus enum value DestinationStatusEnableFailed = "ENABLE_FAILED" + + // DestinationStatusUpdating is a DestinationStatus enum value + DestinationStatusUpdating = "UPDATING" ) // DestinationStatus_Values returns all elements of the DestinationStatus enum @@ -26362,6 +28635,7 @@ func DestinationStatus_Values() []string { DestinationStatusDisabling, DestinationStatusDisabled, DestinationStatusEnableFailed, + DestinationStatusUpdating, } } @@ -26401,6 +28675,38 @@ func ExportStatus_Values() []string { } } +const ( + // ExportTypeFullExport is a ExportType enum value + ExportTypeFullExport = "FULL_EXPORT" + + // ExportTypeIncrementalExport is a ExportType enum value + ExportTypeIncrementalExport = "INCREMENTAL_EXPORT" +) + +// ExportType_Values returns all elements of the ExportType enum +func ExportType_Values() []string { + return []string{ + ExportTypeFullExport, + ExportTypeIncrementalExport, + } +} + +const ( + // ExportViewTypeNewImage is a ExportViewType enum value + ExportViewTypeNewImage = "NEW_IMAGE" + + // ExportViewTypeNewAndOldImages is a ExportViewType enum value + ExportViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" +) + +// ExportViewType_Values returns all elements of the ExportViewType enum +func ExportViewType_Values() []string { + return []string{ + ExportViewTypeNewImage, + ExportViewTypeNewAndOldImages, + } +} + const ( // GlobalTableStatusCreating is a GlobalTableStatus enum value GlobalTableStatusCreating = "CREATING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go index 0c97f94e..e6341124 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -95,6 +95,10 @@ type DynamoDBAPI interface { DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error) DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + DeleteResourcePolicy(*dynamodb.DeleteResourcePolicyInput) (*dynamodb.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyWithContext(aws.Context, *dynamodb.DeleteResourcePolicyInput, ...request.Option) (*dynamodb.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyRequest(*dynamodb.DeleteResourcePolicyInput) (*request.Request, *dynamodb.DeleteResourcePolicyOutput) + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error) DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) @@ -175,6 +179,10 @@ type DynamoDBAPI interface { GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + GetResourcePolicy(*dynamodb.GetResourcePolicyInput) (*dynamodb.GetResourcePolicyOutput, error) + GetResourcePolicyWithContext(aws.Context, *dynamodb.GetResourcePolicyInput, ...request.Option) (*dynamodb.GetResourcePolicyOutput, error) + GetResourcePolicyRequest(*dynamodb.GetResourcePolicyInput) (*request.Request, *dynamodb.GetResourcePolicyOutput) + ImportTable(*dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error) ImportTableWithContext(aws.Context, *dynamodb.ImportTableInput, ...request.Option) (*dynamodb.ImportTableOutput, error) ImportTableRequest(*dynamodb.ImportTableInput) (*request.Request, *dynamodb.ImportTableOutput) @@ -223,6 +231,10 @@ type DynamoDBAPI interface { PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error) PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + PutResourcePolicy(*dynamodb.PutResourcePolicyInput) (*dynamodb.PutResourcePolicyOutput, error) + PutResourcePolicyWithContext(aws.Context, *dynamodb.PutResourcePolicyInput, ...request.Option) (*dynamodb.PutResourcePolicyOutput, error) + PutResourcePolicyRequest(*dynamodb.PutResourcePolicyInput) (*request.Request, *dynamodb.PutResourcePolicyOutput) + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error) QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) @@ -281,6 +293,10 @@ type DynamoDBAPI interface { UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error) UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + UpdateKinesisStreamingDestination(*dynamodb.UpdateKinesisStreamingDestinationInput) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error) + UpdateKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.UpdateKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error) + UpdateKinesisStreamingDestinationRequest(*dynamodb.UpdateKinesisStreamingDestinationInput) (*request.Request, *dynamodb.UpdateKinesisStreamingDestinationOutput) + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error) UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 9bd2107c..2ef2cab5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -122,16 +122,25 @@ const ( // // There is no limit to the number of daily on-demand backups that can be taken. // - // Up to 500 simultaneous table operations are allowed per account. These operations - // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, - // and RestoreTableToPointInTime. + // For most purposes, up to 500 simultaneous table operations are allowed per + // account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, + // RestoreTableFromBackup, and RestoreTableToPointInTime. // - // The only exception is when you are creating a table with one or more secondary - // indexes. You can have up to 250 such requests running at a time; however, - // if the table or index specifications are complex, DynamoDB might temporarily - // reduce the number of concurrent operations. + // When you are creating a table with one or more secondary indexes, you can + // have up to 250 such requests running at a time. However, if the table or + // index specifications are complex, then DynamoDB might temporarily reduce + // the number of concurrent operations. + // + // When importing into DynamoDB, up to 50 simultaneous import table operations + // are allowed per account. // // There is a soft account quota of 2,500 tables. + // + // GetRecords was called with a value of more than 1000 for the limit request + // parameter. + // + // More than 2 processes are reading from the same streams shard at the same + // time. Exceeding this limit may result in request throttling. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodePointInTimeRecoveryUnavailableException for service response error code @@ -140,6 +149,15 @@ const ( // Point in time recovery has not yet been enabled for this source table. ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException" + // ErrCodePolicyNotFoundException for service response error code + // "PolicyNotFoundException". + // + // The operation tried to access a nonexistent resource-based policy. + // + // If you specified an ExpectedRevisionId, it's possible that a policy is present + // for the resource but its revision ID didn't match the expected value. + ErrCodePolicyNotFoundException = "PolicyNotFoundException" + // ErrCodeProvisionedThroughputExceededException for service response error code // "ProvisionedThroughputExceededException". // @@ -230,6 +248,10 @@ const ( // // * There is a user error, such as an invalid data format. // + // * There is an ongoing TransactWriteItems operation that conflicts with + // a concurrent TransactWriteItems request. In this case the TransactWriteItems + // operation fails with a TransactionCanceledException. + // // DynamoDB cancels a TransactGetItems request under the following circumstances: // // * There is an ongoing TransactGetItems operation that conflicts with a @@ -280,7 +302,7 @@ const ( // as DynamoDB is automatically scaling the table. Throughput exceeds the // current capacity for one or more global secondary indexes. DynamoDB is // automatically scaling your index so please try again shortly. This message - // is returned when when writes get throttled on an On-Demand GSI as DynamoDB + // is returned when writes get throttled on an On-Demand GSI as DynamoDB // is automatically scaling the GSI. // // * Validation Error: Code: ValidationError Messages: One or more parameter @@ -306,6 +328,47 @@ const ( // "TransactionInProgressException". // // The transaction with the given request token is already in progress. + // + // Recommended Settings + // + // This is a general recommendation for handling the TransactionInProgressException. + // These settings help ensure that the client retries will trigger completion + // of the ongoing TransactWriteItems request. + // + // * Set clientExecutionTimeout to a value that allows at least one retry + // to be processed after 5 seconds have elapsed since the first attempt for + // the TransactWriteItems operation. + // + // * Set socketTimeout to a value a little lower than the requestTimeout + // setting. + // + // * requestTimeout should be set based on the time taken for the individual + // retries of a single HTTP request for your use case, but setting it to + // 1 second or higher should work well to reduce chances of retries and TransactionInProgressException + // errors. + // + // * Use exponential backoff when retrying and tune backoff if needed. + // + // Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), + // example timeout settings based on the guidelines above are as follows: + // + // Example timeline: + // + // * 0-1000 first attempt + // + // * 1000-1500 first sleep/delay (default retry policy uses 500 ms as base + // delay for 4xx errors) + // + // * 1500-2500 second attempt + // + // * 2500-3500 second sleep/delay (500 * 2, exponential backoff) + // + // * 3500-4500 third attempt + // + // * 4500-6500 third sleep/delay (500 * 2^2) + // + // * 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds + // have elapsed since the first attempt reached TC) ErrCodeTransactionInProgressException = "TransactionInProgressException" ) @@ -329,6 +392,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, "LimitExceededException": newErrorLimitExceededException, "PointInTimeRecoveryUnavailableException": newErrorPointInTimeRecoveryUnavailableException, + "PolicyNotFoundException": newErrorPolicyNotFoundException, "ProvisionedThroughputExceededException": newErrorProvisionedThroughputExceededException, "ReplicaAlreadyExistsException": newErrorReplicaAlreadyExistsException, "ReplicaNotFoundException": newErrorReplicaNotFoundException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index a9c653a0..f1fa8dcf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -67,19 +67,47 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This action aborts a multipart upload. After a multipart upload is aborted, +// This operation aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. // As a result, it might be necessary to abort a given multipart upload multiple // times in order to completely free all storage consumed by all parts. // -// To verify that all parts have been removed, so you don't get charged for +// To verify that all parts have been removed and prevent getting charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// action and ensure that the parts list is empty. +// API operation and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to AbortMultipartUpload: // @@ -173,54 +201,93 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this action to complete the upload. Upon receiving this request, -// Amazon S3 concatenates all the parts in ascending order by part number to -// create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This action -// concatenates the parts that you provide in the list. For each part in the -// list, you must provide the part number and the ETag value, returned after -// that part was uploaded. +// you call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending +// order by part number to create a new object. In the CompleteMultipartUpload +// request, you must provide the parts list and ensure that the parts list is +// complete. The CompleteMultipartUpload API operation concatenates the parts +// that you provide in the list. For each part in the list, you must provide +// the PartNumber value and the ETag value that are returned after that part +// was uploaded. // -// Processing of a Complete Multipart Upload request could take several minutes -// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP // response header that specifies a 200 OK response. While processing is in // progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. Because a request could fail after the initial -// 200 OK response has been sent, it is important that you check the response -// body to determine whether the request succeeded. +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either +// a success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs +// detect the embedded error and apply error handling per your configuration +// settings (including automatically retrying the request as appropriate). If +// the condition persists, the SDKs throw an exception (or, for the SDKs that +// don't use exceptions, they return an error). // // Note that if CompleteMultipartUpload fails, applications should be prepared -// to retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// to retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). // -// You cannot use Content-Type: application/x-www-form-urlencoded with Complete -// Multipart Upload requests. Also, if you do not provide a Content-Type header, -// CompleteMultipartUpload returns a 200 OK response. +// You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload +// requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload +// can still return a 200 OK response. // // For more information about multipart uploads, see Uploading Objects Using -// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// CompleteMultipartUpload has the following special errors: +// Permissions // -// - Error code: EntityTooSmall Description: Your proposed upload is smaller +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Special errors +// +// - Error Code: EntityTooSmall Description: Your proposed upload is smaller // than the minimum allowed object size. Each part must be at least 5 MB -// in size, except the last part. 400 Bad Request +// in size, except the last part. HTTP Status Code: 400 Bad Request // -// - Error code: InvalidPart Description: One or more of the specified parts +// - Error Code: InvalidPart Description: One or more of the specified parts // could not be found. The part might not have been uploaded, or the specified -// entity tag might not have matched the part's entity tag. 400 Bad Request +// ETag might not have matched the uploaded part's ETag. HTTP Status Code: +// 400 Bad Request // -// - Error code: InvalidPartOrder Description: The list of parts was not +// - Error Code: InvalidPartOrder Description: The list of parts was not // in ascending order. The parts list must be specified in order by part -// number. 400 Bad Request +// number. HTTP Status Code: 400 Bad Request // -// - Error code: NoSuchUpload Description: The specified multipart upload +// - Error Code: NoSuchUpload Description: The specified multipart upload // does not exist. The upload ID might be invalid, or the multipart upload -// might have been aborted or completed. 404 Not Found +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CompleteMultipartUpload: // @@ -313,157 +380,113 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // upload Upload Part - Copy (UploadPartCopy) API. For more information, see // Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // -// All copy requests must be authenticated. Additionally, you must have read -// access to the source object and write access to the destination bucket. For -// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// // Both the Region that you want to copy the object from and the Region that -// you want to copy the object to must be enabled for your account. -// -// A copy request might return an error when Amazon S3 receives the copy request -// or while Amazon S3 is copying the files. If the error occurs before the copy -// action starts, you receive a standard Amazon S3 error. If the error occurs -// during the copy operation, the error response is embedded in the 200 OK response. -// This means that a 200 OK response can contain either a success or an error. -// Design your application to parse the contents of the response and handle -// it appropriately. -// -// If the copy is successful, you receive a response with information about -// the copied object. -// -// If the request is an HTTP 1.1 request, the response is chunk encoded. If -// it were not, it would not contain the content-length, and you would need -// to read the entire body. -// -// The copy request charge is based on the storage class and Region that you -// specify for the destination object. For pricing information, see Amazon S3 -// pricing (http://aws.amazon.com/s3/pricing/). +// you want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see Enable or +// disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you // get a 400 Bad Request error. For more information, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). // -// # Metadata +// # Authentication and authorization // -// When copying an object, you can preserve all metadata (default) or specify -// new metadata. However, the ACL is not preserved and is set to private for -// the user making the request. To override the default ACL setting, specify -// a new ACL when generating a copy request. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// All CopyObject requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). // -// To specify whether you want the object metadata copied from the source object -// or replaced with metadata provided in the request, you can optionally add -// the x-amz-metadata-directive header. When you grant permissions, you can -// use the s3:x-amz-metadata-directive condition key to enforce certain metadata -// behavior when objects are uploaded. For more information, see Specifying -// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) -// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition -// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. // -// x-amz-copy-source-if Headers +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. // -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, -// use the following request parameters: +// # Permissions // -// - x-amz-copy-source-if-match +// You must have read access to the source object and write access to the destination +// bucket. // -// - x-amz-copy-source-if-none-match +// - General purpose bucket permissions - You must have permissions in an +// IAM policy based on the source and destination bucket types in a CopyObject +// operation. If the source object is in a general purpose bucket, you must +// have s3:GetObject permission to read the source object that is being copied. +// If the destination bucket is a general purpose bucket, you must have s3:PutObject +// permission to write the object copy to the destination bucket. // -// - x-amz-copy-source-if-unmodified-since +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in a CopyObject operation. If the source object that you +// want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key can't be set to ReadOnly on the copy destination bucket. +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// - x-amz-copy-source-if-modified-since +// # Response and special errors // -// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// 200 OK and copies the data: +// When the request is an HTTP 1.1 request, the response is chunk encoded. When +// the request is not an HTTP 1.1 request, the response would not contain the +// Content-Length. You always need to read the entire response body to check +// if the copy succeeds. // -// - x-amz-copy-source-if-match condition evaluates to true +// - If the copy is successful, you receive a response with information about +// the copied object. // -// - x-amz-copy-source-if-unmodified-since condition evaluates to false +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can +// contain either a success or an error. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK +// response. For example, in a cross-region copy, you may encounter throttling +// and receive a 200 OK response. For more information, see Resolve the Error +// 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror). +// The 200 OK status code means the copy was accepted, but it doesn't mean +// the copy is complete. Another example is when you disconnect from Amazon +// S3 before the copy is complete, Amazon S3 might cancel the copy and you +// may receive a 200 OK response. You must stay connected to Amazon S3 until +// the entire response is successfully received and processed. If you call +// this API operation directly, make sure to design your application to parse +// the content of the response and handle it appropriately. If you use Amazon +// Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded +// error and apply error handling per your configuration settings (including +// automatically retrying the request as appropriate). If the condition persists, +// the SDKs throw an exception (or, for the SDKs that don't use exceptions, +// they return an error). // -// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// the 412 Precondition Failed response code: +// # Charge // -// - x-amz-copy-source-if-none-match condition evaluates to false +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. If the copy source is in a different region, the data transfer +// is billed to the copy source account. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). // -// - x-amz-copy-source-if-modified-since condition evaluates to true +// # HTTP Host header syntax // -// All headers with the x-amz- prefix, including x-amz-copy-source, must be -// signed. -// -// # Server-side encryption -// -// When you perform a CopyObject operation, you can optionally use the appropriate -// encryption-related headers to encrypt the object using server-side encryption -// with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a -// customer-provided encryption key. With server-side encryption, Amazon S3 -// encrypts your data as it writes it to disks in its data centers and decrypts -// the data when you access it. For more information about server-side encryption, -// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// -// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the -// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. -// -// # Access Control List (ACL)-Specific Request Headers -// -// When copying an object, you can optionally use headers to grant ACL-based -// permissions. By default, all objects are private. Only the owner has full -// access control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the ACL on the object. For more information, -// see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). -// -// If the bucket that you're copying objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. -// -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. -// -// # Checksums -// -// When copying an object, if it has a checksum, that checksum will be copied -// to the new object by default. When you copy the object over, you may optionally -// specify a different checksum algorithm to use with the x-amz-checksum-algorithm -// header. -// -// # Storage Class Options -// -// You can use the CopyObject action to change the storage class of an object -// that is already stored in Amazon S3 using the StorageClass parameter. For -// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. -// -// # Versioning -// -// By default, x-amz-copy-source identifies the current version of an object -// to copy. If the current version is a delete marker, Amazon S3 behaves as -// if the object was deleted. To copy a different version, use the versionId -// subresource. -// -// If you enable versioning on the target bucket, Amazon S3 generates a unique -// version ID for the object being copied. This version ID is different from -// the version ID of the source object. Amazon S3 returns the version ID of -// the copied object in the x-amz-version-id response header in the response. -// -// If you do not enable versioning or suspend it on the target bucket, the version -// ID that Amazon S3 generates is always null. -// -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CopyObject: // @@ -471,8 +494,6 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -550,94 +571,97 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new S3 bucket. To create a bucket, you must register with Amazon -// S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. // Anonymous requests are never allowed to create buckets. By creating the bucket, // you become the bucket owner. // -// Not every string is an acceptable bucket name. For information about bucket -// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). -// -// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). -// -// By default, the bucket is created in the US East (N. Virginia) Region. You -// can optionally specify a Region in the request body. You might choose a Region -// to optimize latency, minimize costs, or address regulatory requirements. -// For example, if you reside in Europe, you will probably find it advantageous -// to create buckets in the Europe (Ireland) Region. For more information, see -// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). -// -// If you send your create bucket request to the s3.amazonaws.com endpoint, -// the request goes to the us-east-1 Region. Accordingly, the signature calculations -// in Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to -// be created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, -// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). -// -// Access control lists (ACLs) -// -// When creating a bucket using this operation, you can optionally configure -// the bucket ACL to specify the accounts or groups that should be granted specific -// permissions on the bucket. -// -// If your CreateBucket request sets bucket owner enforced for S3 Object Ownership -// and specifies a bucket ACL that provides access to an external Amazon Web -// Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership -// error code. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see Creating, configuring, +// and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) // in the Amazon S3 User Guide. // -// There are two ways to grant the appropriate permissions using the request -// headers. +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. +// So the signature calculations in Signature Version 4 must use us-east-1 +// as the Region, even if the location constraint in the request specifies +// another Region where the bucket is to be created. If you create a bucket +// in a Region other than US East (N. Virginia), your application must be +// able to handle 307 redirect. For more information, see Virtual hosting +// of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. // -// - Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports -// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a -// predefined set of grantees and permissions. For more information, see -// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// - Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. These headers map to the set of permissions Amazon S3 supports -// in an ACL. For more information, see Access control list (ACL) overview -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). -// You specify each grantee as a type=value pair, where the type is one of -// the following: id – if the value specified is the canonical user ID -// of an Amazon Web Services account uri – if you are granting permissions -// to a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" +// Permissions // -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: Access control lists (ACLs) // -// # Permissions +// - In your CreateBucket request, if you specify an access control list +// (ACL) and set it to public-read, public-read-write, authenticated-read, +// or if you explicitly specify any other custom ACLs, both s3:CreateBucket +// and s3:PutBucketAcl permissions are required. In your CreateBucket request, +// if you set the ACL to private, or if you don't specify any ACLs, only +// the s3:CreateBucket permission is required. Object Lock - In your CreateBucket +// request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration +// and s3:PutBucketVersioning permissions are required. S3 Object Ownership // -// In addition to s3:CreateBucket, the following permissions are required when -// your CreateBucket includes specific headers: +// - If your CreateBucket request includes the x-amz-object-ownership header, +// then the s3:PutBucketOwnershipControls permission is required. To set +// an ACL on a bucket as part of a CreateBucket request, you must explicitly +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and +// then explicitly disable Block Public Access on the bucket before using +// PutBucketAcl to set the ACL. If you try to create a bucket with a public +// ACL, the request will fail. For the majority of modern use cases in S3, +// we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside +// of your account, you can use bucket policies as needed. For more information, +// see Controlling ownership of objects and disabling ACLs for your bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. S3 Block Public Access - If your specific +// use case requires granting public access to your S3 resources, you can +// disable Block Public Access. Specifically, you can create a new bucket +// with Block Public Access enabled, then separately call the DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. // -// - ACLs - If your CreateBucket request specifies ACL permissions and the -// ACL is public-read, public-read-write, authenticated-read, or if you specify -// access permissions explicitly through any other ACL, both s3:CreateBucket -// and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket -// request is private or doesn't specify any ACLs, only s3:CreateBucket permission -// is needed. +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 +// Object Ownership, and S3 Block Public Access are not supported for directory +// buckets. For directory buckets, all Block Public Access settings are enabled +// at the bucket level and S3 Object Ownership is set to Bucket owner enforced +// (ACLs disabled). These settings can't be modified. For more information +// about permissions for creating and working with directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. // -// - Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket -// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning -// permissions are required. +// # HTTP Host header syntax // -// - S3 Object Ownership - If your CreateBucket request includes the the -// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission -// is required. +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to CreateBucket: // @@ -735,143 +759,139 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // You specify this upload ID in each of your subsequent upload part requests // (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or -// abort the multipart upload request. +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. // -// For more information about multipart uploads, see Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stops charging you for storing them only after you either complete +// or abort a multipart upload. // // If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the upload must complete within the number of days specified in the bucket -// lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort action and Amazon S3 aborts the multipart upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts +// the multipart upload. For more information, see Aborting Incomplete Multipart +// Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // -// For information about the permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Request signing // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, // and then complete the multipart upload process. You sign each request individually. // There is nothing special about signing multipart upload requests. For more // information about signing, see Authenticating Requests (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or -// abort the multipart upload. Amazon S3 frees up the space used to store the -// parts and stop charging you for storing them only after you either complete -// or abort a multipart upload. -// -// You can optionally request server-side encryption. For server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You can provide your own encryption key, -// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. -// If you choose to provide your own encryption key, the request headers you -// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload. -// -// To perform a multipart upload with encryption using an Amazon Web Services -// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* -// actions on the key. These permissions are required because Amazon S3 must -// decrypt and read data from the encrypted file parts before it completes the -// multipart upload. For more information, see Multipart upload API and permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) // in the Amazon S3 User Guide. // -// If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the KMS key, then you must have these permissions -// on the key policy. If your IAM user or role belongs to a different account -// than the key, then you must have the permissions on both the key policy and -// your IAM user or role. +// Permissions // -// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption +// by using an Amazon Web Services KMS key, the requester must have permission +// to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These +// permissions are required because Amazon S3 must decrypt and read data +// from the encrypted file parts before it completes the multipart upload. +// For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. // -// # Access Permissions +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). // -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: +// Encryption // -// - Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// # Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use Amazon Web Services managed encryption keys or provide your own encryption -// key. -// -// - Use encryption keys managed by Amazon S3 or customer managed key stored -// in Amazon Web Services Key Management Service (Amazon Web Services KMS) -// – If you want Amazon Web Services to manage the keys used to encrypt +// - General purpose buckets - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. Amazon S3 automatically +// encrypts all new objects that are uploaded to an S3 bucket. When doing +// a multipart upload, if you don't specify encryption information in your +// request, the encryption setting of the uploaded parts is set to the default +// encryption configuration of the destination bucket. By default, all buckets +// have a base level of encryption configuration that uses server-side encryption +// with Amazon S3 managed keys (SSE-S3). If the destination bucket has a +// default encryption configuration that uses server-side encryption with +// an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a +// customer-provided key to encrypt the uploaded parts. When you perform +// a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon +// S3 encrypts the object with a different encryption key (such as an Amazon +// S3 managed key, a KMS key, or a customer-provided key). When the encryption +// setting in your request is different from the default encryption configuration +// of the destination bucket, the encryption setting in your request takes +// precedence. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload +// request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed +// key (aws/s3) and KMS customer managed keys stored in Key Management Service +// (KMS) – If you want Amazon Web Services to manage the keys used to encrypt // data, specify the following headers in the request. x-amz-server-side-encryption // x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context // If you specify x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed key in Amazon Web Services KMS to protect the data. -// All GET and PUT requests for an object protected by Amazon Web Services -// KMS fail if you don't make them with SSL or by using SigV4. For more information -// about server-side encryption with KMS key (SSE-KMS), see Protecting Data -// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// Web Services managed key (aws/s3 key) in KMS to protect the data. To perform +// a multipart upload with encryption by using an Amazon Web Services KMS +// key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// actions on the key. These permissions are required because Amazon S3 must +// decrypt and read data from the encrypted file parts before it completes +// the multipart upload. For more information, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) +// user or role is in the same Amazon Web Services account as the KMS key, +// then you must have these permissions on the key policy. If your IAM user +// or role is in a different account from the key, then you must have the +// permissions on both the key policy and your IAM user or role. All GET +// and PUT requests for an object protected by KMS fail if you don't make +// them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying +// the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side encryption +// with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption +// with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) +// – If you want to manage your own encryption keys, provide all the following +// headers in the request. x-amz-server-side-encryption-customer-algorithm +// x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 +// For more information about server-side encryption with customer-provided +// encryption keys (SSE-C), see Protecting data using server-side encryption +// with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. // -// - Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key -// x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using -// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// - Directory buckets -For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// # Access-Control-List (ACL)-Specific Request Headers +// # HTTP Host header syntax // -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// Amazon Web Services accounts or to predefined groups defined by Amazon S3. -// These permissions are then added to the access control list (ACL) on the -// object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// - Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// - Specify access permissions explicitly — To explicitly grant access -// permissions to specific Amazon Web Services accounts or groups, use the -// following headers. Each header maps to specific permissions that Amazon -// S3 supports in an ACL. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: id – if the value specified is the canonical user ID of an -// Amazon Web Services account uri – if you are granting permissions to -// a predefined group emailAddress – if the value specified is the email -// address of an Amazon Web Services account Using email addresses to specify -// a grantee is only supported in the following Amazon Web Services Regions: -// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific -// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -// South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified -// by account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to CreateMultipartUpload: // @@ -913,6 +933,152 @@ func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMult return out, req.Send() } +const opCreateSession = "CreateSession" + +// CreateSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSession for more information on using the CreateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateSessionRequest method. +// req, resp := client.CreateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) { + op := &request.Operation{ + Name: opCreateSession, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?session", + } + + if input == nil { + input = &CreateSessionInput{} + } + + output = &CreateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSession API operation for Amazon Simple Storage Service. +// +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission +// to a bucket in a bucket policy or an IAM identity-based policy. Then, you +// use IAM credentials to make the CreateSession API request on the bucket, +// which returns temporary security credentials that include the access key +// ID, secret access key, session token, and expiration. These credentials have +// associated permissions to access the Zonal endpoint APIs. After the session +// is created, you don’t need to use other policies to grant permissions to +// each Zonal endpoint API individually. Instead, in your Zonal endpoint API +// requests, you sign your requests by applying the temporary security credentials +// of the session to the request headers and following the SigV4 protocol for +// authentication. You also apply the session token to the x-amz-s3session-token +// request header for authorization. Temporary security credentials are scoped +// to the bucket and expire after 5 minutes. After the expiration time, any +// calls that you make with those credentials will fail. You must use IAM credentials +// again to make a CreateSession API request that generates a new set of temporary +// credentials for use. Temporary credentials cannot be extended or refreshed +// beyond the original specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We recommend +// that you use the Amazon Web Services SDKs to initiate and manage requests +// to the CreateSession API. For more information, see Performance guidelines +// and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// +// - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. +// Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the CopyObject +// API operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html). +// +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the HeadBucket +// API operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html). +// +// # Permissions +// +// To obtain temporary security credentials, you must create a bucket policy +// or an IAM identity-based policy that grants s3express:CreateSession permission +// to the bucket. In a policy, you can have the s3express:SessionMode condition +// key to control who can create a ReadWrite or ReadOnly session. For more information +// about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters). +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// To grant cross-account access to Zonal endpoint APIs, the bucket policy should +// also grant both accounts the s3express:CreateSession permission. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateSession for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + return out, req.Send() +} + +// CreateSessionWithContext is the same as CreateSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the @@ -960,7 +1126,36 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. // -// Related Resources +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to DeleteBucket: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -1038,6 +1233,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // @@ -1130,6 +1327,8 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // DeleteBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the cors configuration information set for the bucket. // // To use this operation, you must have permission to perform the s3:PutBucketCORS @@ -1139,7 +1338,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) // in the Amazon S3 User Guide. // -// Related Resources: +// Related Resources // // - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // @@ -1217,9 +1416,12 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE action removes default encryption from -// the bucket. For information about the Amazon S3 default encryption feature, -// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// This operation is not supported by directory buckets. +// +// This implementation of the DELETE action resets the default encryption for +// the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// For information about the bucket default encryption feature, see Amazon S3 +// Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration @@ -1229,7 +1431,7 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to DeleteBucketEncryption: // // - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // @@ -1307,6 +1509,8 @@ func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBuc // DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -1406,6 +1610,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // @@ -1498,6 +1704,8 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the lifecycle configuration from the specified bucket. Amazon S3 // removes all the lifecycle configuration rules in the lifecycle subresource // associated with the bucket. Your objects never expire, and Amazon S3 no longer @@ -1592,6 +1800,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes a metrics configuration for the Amazon CloudWatch request metrics // (specified by the metrics configuration ID) from the bucket. Note that this // doesn't include the daily storage metrics. @@ -1687,6 +1897,8 @@ func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipCo // DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes OwnershipControls for an Amazon S3 bucket. To use this operation, // you must have the s3:PutBucketOwnershipControls permission. For more information // about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). @@ -1772,23 +1984,52 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE action uses the policy subresource to delete -// the policy of a specified bucket. If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must have the DeleteBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account to use this operation. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the DeleteBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 // Access Denied error. If you have the correct permissions, but you're not // using an identity that belongs to the bucket owner's account, Amazon S3 returns // a 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. // -// For more information about bucket policies, see Using Bucket Policies and -// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to DeleteBucketPolicy // @@ -1868,6 +2109,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the replication configuration from the bucket. // // To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration @@ -1960,6 +2203,8 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // DeleteBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Deletes the tags from the bucket. // // To use this operation, you must have permission to perform the s3:PutBucketTagging @@ -2044,6 +2289,8 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This action removes the website configuration for a bucket. Amazon S3 returns // a 200 OK response upon successfully deleting a website configuration on the // specified bucket. You will get a 200 OK response if the website configuration @@ -2136,31 +2383,88 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // DeleteObject API operation for Amazon Simple Storage Service. // -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects but will still respond -// that the command was successful. +// Removes an object from a bucket. The behavior depends on the bucket's versioning +// state: // -// To remove a specific version, you must be the bucket owner and you must use -// the version Id subresource. Using this subresource permanently deletes the -// version. If the object deleted is a delete marker, Amazon S3 sets the response -// header, x-amz-delete-marker, to true. +// - If bucket versioning is not enabled, the operation permanently deletes +// the object. +// +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete +// an object in a versioned bucket, you must include the object’s versionId +// in the request. For more information about versioning-enabled buckets, +// see Deleting object versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html). +// +// - If bucket versioning is suspended, the operation removes the object +// that has a null versionId, if there is one, and inserts a delete marker +// that becomes the current version of the object. If there isn't an object +// with a null versionId, and all versions of the object have a versionId, +// Amazon S3 does not remove the object and only inserts a delete marker. +// To permanently delete an object that has a versionId, you must include +// the object’s versionId in the request. For more information about versioning-suspended +// buckets, see Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html). +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. +// Using this query parameter permanently deletes the version. If the object +// deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker +// to true. // // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, +// see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). -// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// Directory buckets - MFA delete is not supported by directory buckets. // -// You can delete objects by explicitly calling DELETE Object or configure its -// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versioning-enabled bucket, you must have the +// s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// // The following action is related to DeleteObject: // // - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) @@ -2236,6 +2540,8 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes the entire tag set from the specified object. For more information // about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // @@ -2246,7 +2552,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // in the request. You will need permission for the s3:DeleteObjectVersionTagging // action. // -// The following operations are related to DeleteBucketMetricsConfiguration: +// The following operations are related to DeleteObjectTagging: // // - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // @@ -2327,36 +2633,82 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This action enables you to delete multiple objects from a bucket using a -// single HTTP request. If you know the object keys that you want to delete, -// then this action provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. // -// The request contains a list of up to 1000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if -// you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete action and returns the -// result of that delete, success, or failure, in the response. Note that if +// The request can contain a list of up to 1000 keys that you want to delete. +// In the XML, you provide the object key names, and optionally, version IDs +// if you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The action supports two modes for the response: verbose and quiet. By default, -// the action uses verbose mode in which the response includes the result of -// deletion of each key in your request. In quiet mode the response includes -// only keys where the delete action encountered an error. For a successful -// deletion, the action does not return any information about the delete in -// the response body. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any information +// about the delete in the response body. // // When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether // there are versioned keys in the request or not, the entire Multi-Object Delete -// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. // -// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. -// Amazon S3 uses the header value to ensure that your request body has not -// been altered in transit. +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versioning-enabled bucket, you must specify +// the s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Content-MD5 request header +// +// - General purpose bucket - The Content-MD5 request header is required +// for all Multi-Object Delete requests. Amazon S3 uses the header value +// to ensure that your request body has not been altered in transit. +// +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, +// x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object +// Delete requests. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to DeleteObjects: // @@ -2442,6 +2794,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use // this operation, you must have the s3:PutBucketPublicAccessBlock permission. // For more information about permissions, see Permissions Related to Bucket @@ -2529,6 +2883,8 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action uses the accelerate subresource to // return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that @@ -2553,7 +2909,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAccelerateConfiguration: // // - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) // @@ -2628,11 +2984,22 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of -// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. +// the bucket, you must have the READ_ACP access to the bucket. If READ_ACP +// permission is granted to the anonymous user, you can return the ACL of the +// bucket without using an authorization header. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). // // If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the bucket-owner-full-control @@ -2640,7 +3007,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAcl: // // - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // @@ -2715,6 +3082,8 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // @@ -2729,7 +3098,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to GetBucketAnalyticsConfiguration: // // - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // @@ -2808,6 +3177,8 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the Cross-Origin Resource Sharing (CORS) configuration information // set for the bucket. // @@ -2815,6 +3186,15 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // action. By default, the bucket owner has this permission and can grant it // to others. // +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// // For more information about CORS, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). // @@ -2895,12 +3275,14 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the default encryption configuration for an Amazon S3 bucket. If -// the bucket does not have a default encryption configuration, GetBucketEncryption -// returns ServerSideEncryptionConfigurationNotFoundError. +// This operation is not supported by directory buckets. // -// For information about the Amazon S3 default encryption feature, see Amazon -// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. // // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner @@ -2985,6 +3367,8 @@ func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketInt // GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -3083,6 +3467,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns an inventory configuration (identified by the inventory configuration // ID) from the bucket. // @@ -3184,6 +3570,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // see the updated version of this topic. This topic is provided for backward // compatibility. // +// This operation is not supported by directory buckets. +// // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). // @@ -3282,13 +3670,18 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier action, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -3384,14 +3777,24 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the Region the bucket resides in. You set the bucket's Region using // the LocationConstraint request parameter in a CreateBucket request. For more // information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). // -// To use this implementation of the operation, you must be the bucket owner. +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. // -// To use this API against an access point, provide the alias of the access -// point in place of the bucket name. +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. // // The following operations are related to GetBucketLocation: // @@ -3470,8 +3873,10 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // GetBucketLogging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. +// view and modify that status. // // The following operations are related to GetBucketLogging: // @@ -3550,6 +3955,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets a metrics configuration (specified by the metrics configuration ID) // from the bucket. Note that this doesn't include the daily storage metrics. // @@ -3648,6 +4055,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3725,6 +4134,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the notification configuration of a bucket. // // If notifications are not enabled on the bucket, the action returns an empty @@ -3735,6 +4146,15 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // to other users to read this configuration with the s3:GetBucketNotification // permission. // +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// // For more information about setting and reading the notification configuration // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). @@ -3814,6 +4234,8 @@ func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControls // GetBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, // you must have the s3:GetBucketOwnershipControls permission. For more information // about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). @@ -3898,22 +4320,62 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the Amazon Web Services account that owns the bucket, -// the calling identity must have the GetBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the GetBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access // Denied error. If you have the correct permissions, but you're not using an // identity that belongs to the bucket owner's account, Amazon S3 returns a // 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. // -// For more information about bucket policies, see Using Bucket Policies and -// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// - General purpose bucket permissions - The s3:GetBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following action is related to GetBucketPolicy: // @@ -3990,6 +4452,8 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // GetBucketPolicyStatus API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves the policy status for an Amazon S3 bucket, indicating whether the // bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus // permission. For more information about Amazon S3 permissions, see Specifying @@ -4079,6 +4543,8 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // GetBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the replication configuration of a bucket. // // It can take a while to propagate the put or delete a replication configuration @@ -4176,6 +4642,8 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the request payment configuration of a bucket. To use this version // of the operation, you must be the bucket owner. For more information, see // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). @@ -4255,6 +4723,8 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // GetBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the tag set associated with the bucket. // // To use this operation, you must have permission to perform the s3:GetBucketTagging @@ -4343,6 +4813,8 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // GetBucketVersioning API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the versioning state of a bucket. // // To retrieve the versioning state of a bucket, you must be the bucket owner. @@ -4430,6 +4902,8 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the website configuration for a bucket. To host website on Amazon // S3, you can configure a bucket as website by adding a website configuration. // For more information about hosting websites, see Hosting Websites on Amazon @@ -4440,7 +4914,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. // -// The following operations are related to DeleteBucketWebsite: +// The following operations are related to GetBucketWebsite: // // - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) // @@ -4517,110 +4991,106 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. To use GET, you must have READ access to -// the object. If you grant READ access to the anonymous user, you can return -// the object without using an authorization header. +// Retrieves an object from Amazon S3. // -// An Amazon S3 bucket has no directory hierarchy such as you would find in -// a typical computer file system. You can, however, create a logical hierarchy -// by using object key names that imply a folder structure. For example, instead -// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// In the GetObject request, specify the full key name for the object. // -// To get an object from such a logical hierarchy, specify the full key name -// for the object in the GET operation. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg, specify the resource -// as /photos/2006/February/sample.jpg. For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, -// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For -// more information about request types, see HTTP Host Header Bucket Specification -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the object +// key name as /photos/2006/February/sample.jpg. For a path-style request example, +// if you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. +// For more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. // -// For more information about returning the ACL of an object, see GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// Directory buckets - Only virtual-hosted-style requests are supported. For +// a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg +// in the bucket named examplebucket--use1-az5--x-s3, specify the object key +// name as /photos/2006/February/sample.jpg. Also, when you make requests to +// this API operation, your requests are sent to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier -// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering -// Deep Archive tiers, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this action returns an InvalidObjectStateError error. For information -// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// Permissions +// +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject, you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For +// more information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If +// the object that you request doesn’t exist, the error that Amazon S3 +// returns depends on whether you also have the s3:ListBucket permission. +// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket +// permission, Amazon S3 returns an HTTP status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Storage classes +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request. +// +// # Encryption // // Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption -// keys (SSE-S3). If your object does use these types of keys, you’ll get -// an HTTP 400 BadRequest error. +// be sent for the GetObject requests, if your object uses server-side encryption +// with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in +// your GetObject requests for the object that uses these types of keys, you’ll +// get an HTTP 400 Bad Request error. // -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you GET the object, you must use the following headers: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// -// Assuming you have the relevant permission to read object tags, the response -// also returns the x-amz-tagging-count header that provides the count of number -// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// to retrieve the tag set associated with an object. -// -// # Permissions -// -// You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 will -// return an HTTP status code 404 ("no such key") error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 will return -// an HTTP status code 403 ("access denied") error. -// -// # Versioning -// -// By default, the GET action returns the current version of an object. To return -// a different version, use the versionId subresource. -// -// - If you supply a versionId, you need the s3:GetObjectVersion permission -// to access a specific version of an object. If you request a specific version, -// you do not need to have the s3:GetObject permission. -// -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in -// the response. -// -// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). -// -// # Overriding Response Header Values +// # Overriding response header values through the request // // There are times when you want to override certain response header values -// in a GET response. For example, you might override the Content-Disposition -// response header value in your GET request. +// of a GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. // -// You can override values for a set of response headers using the following -// query parameters. These response header values are sent only on a successful -// request, that is, when status code 200 OK is returned. The set of headers -// you can override using these parameters is a subset of the headers that Amazon -// S3 accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, -// Content-Disposition, and Content-Encoding. To override these header values -// in the GET response, you use the following request parameters. +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the +// HTTP status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. // -// You must sign the request, either using an Authorization header or a presigned -// URL, when using these parameters. They cannot be used with an unsigned (anonymous) -// request. +// The response headers that you can override for the GetObject response are +// Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, +// and Expires. // -// - response-content-type -// -// - response-content-language -// -// - response-expires +// To override values for a set of response headers in the GetObject response, +// you can use the following query parameters in the request. // // - response-cache-control // @@ -4628,17 +5098,19 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // - response-content-encoding // -// # Additional Considerations about Request Headers +// - response-content-language // -// If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since -// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// - response-content-type // -// If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since -// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// - response-expires // -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// When you use these parameters, you must sign the request by using either +// an Authorization header or a presigned URL. These parameters cannot be used +// with an unsigned (anonymous) request. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to GetObject: // @@ -4661,6 +5133,15 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // - ErrCodeInvalidObjectState "InvalidObjectState" // Object is archived and inaccessible until restored. // +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) @@ -4726,15 +5207,15 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the access control list (ACL) of an object. To use this operation, // you must have s3:GetObjectAcl permissions or READ_ACP access to the object. // For more information, see Mapping of ACL permissions and access policy permissions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) // in the Amazon S3 User Guide // -// This action is not supported by Amazon S3 on Outposts. -// -// # Versioning +// This functionality is not supported for Amazon S3 on Outposts. // // By default, GET returns ACL information about the current version of an object. // To return ACL information about a different version, use the versionId subresource. @@ -4832,17 +5313,65 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r // GetObjectAttributes API operation for Amazon Simple Storage Service. // // Retrieves all the metadata from an object without returning the object itself. -// This action is useful if you're interested only in an object's metadata. -// To use GetObjectAttributes, you must have READ access to the object. +// This operation is useful if you're interested only in an object's metadata. // -// GetObjectAttributes combines the functionality of GetObjectAcl, GetObjectLegalHold, -// GetObjectLockConfiguration, GetObjectRetention, GetObjectTagging, HeadObject, -// and ListParts. All of the data returned with each of those individual calls -// can be returned with a single call to GetObjectAttributes. +// GetObjectAttributes combines the functionality of HeadObject and ListParts. +// All of the data returned with each of those individual calls can be returned +// with a single call to GetObjectAttributes. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use GetObjectAttributes, you +// must have READ access to the object. The permissions that you need to +// use this operation with depend on whether the bucket is versioned. If +// the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need +// the s3:GetObject and s3:GetObjectAttributes permissions. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. If you have the s3:ListBucket permission on the bucket, Amazon +// S3 returns an HTTP status code 404 Not Found ("no such key") error. If +// you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a GET request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: // // - x-amz-server-side-encryption-customer-algorithm // @@ -4854,47 +5383,35 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // -// - Encryption request headers, such as x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management -// Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption -// keys (SSE-S3). If your object does use these types of keys, you'll get -// an HTTP 400 Bad Request error. +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// - The last modified property in this case is the creation date of the -// object. +// # Versioning +// +// Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # Conditional request headers // // Consider the following when using request headers: // // - If both of the If-Match and If-Unmodified-Since headers are present // in the request as follows, then Amazon S3 returns the HTTP status code // 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since -// condition evaluates to false. +// condition evaluates to false. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). // // - If both of the If-None-Match and If-Modified-Since headers are present // in the request as follows, then Amazon S3 returns the HTTP status code // 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since -// condition evaluates to true. +// condition evaluates to true. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). // -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// # HTTP Host header syntax // -// # Permissions -// -// The permissions that you need to use this operation depend on whether the -// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion -// and s3:GetObjectVersionAttributes permissions for this operation. If the -// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, -// the error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 Not Found ("no such key") error. -// -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an -// HTTP status code 403 Forbidden ("access denied") error. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following actions are related to GetObjectAttributes: // @@ -4990,10 +5507,12 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets an object's current legal hold status. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectLegalHold: // @@ -5070,6 +5589,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Gets the Object Lock configuration for a bucket. The rule specified in the // Object Lock configuration will be applied by default to every new object // placed in the specified bucket. For more information, see Locking Objects @@ -5150,10 +5671,12 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // GetObjectRetention API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves an object's retention settings. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectRetention: // @@ -5230,6 +5753,8 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // GetObjectTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns the tag-set of an object. You send the GET request against the tagging // subresource associated with the object. // @@ -5325,9 +5850,10 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. For more information about BitTorrent, see -// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// you're distributing large files. // // You can get torrent only for objects that are less than 5 GB in size, and // that are not encrypted using server-side encryption with a customer-provided @@ -5335,7 +5861,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // To use GET, you must have READ access to the object. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // The following action is related to GetObjectTorrent: // @@ -5412,6 +5938,8 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To // use this operation, you must have the s3:GetBucketPublicAccessBlock permission. // For more information about Amazon S3 permissions, see Specifying Permissions @@ -5503,33 +6031,63 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou output = &HeadBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // HeadBucket API operation for Amazon Simple Storage Service. // -// This action is useful to determine if a bucket exists and you have permission -// to access it. The action returns a 200 OK if the bucket exists and you have -// permission to access it. +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists +// and you have permission to access it. // // If the bucket does not exist or you do not have permission to access it, -// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A -// message body is not included, so you cannot determine the exception beyond -// these error codes. +// the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 +// Not Found code. A message body is not included, so you cannot determine the +// exception beyond these HTTP response codes. // -// To use this operation, you must have permissions to perform the s3:ListBucket -// action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Directory buckets - You must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in +// the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// To use this API against an access point, you must provide the alias of the -// access point in place of the bucket name or specify the access point ARN. -// When using the access point ARN, you must direct requests to the access point -// hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. -// When using the Amazon Web Services SDKs, you provide the ARN in place of -// the bucket name. For more information see, Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// # Authentication and authorization +// +// All HeadBucket requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have permissions to perform the s3:ListBucket action. The bucket owner +// has this permission by default and can grant this permission to others. +// For more information about permissions, see Managing access permissions +// to your Amazon S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session +// is in the ReadWrite mode. If you want to restrict the access, you can +// explicitly set the s3express:SessionMode condition key to ReadOnly on +// the bucket. For more information about example bucket policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5607,19 +6165,70 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD action retrieves metadata from an object without returning the object -// itself. This action is useful if you're only interested in an object's metadata. -// To use HEAD, you must have READ access to the object. +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. // -// A HEAD request has the same options as a GET action on an object. The response -// is identical to the GET response except that there is no response body. Because -// of this, if the HEAD request generates an error, it returns a generic 404 -// Not Found or 403 Forbidden code. It is not possible to retrieve the exact -// exception beyond these error codes. +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns +// a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 +// Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use HEAD, you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, +// the error that Amazon S3 returns depends on whether you also have the +// s3:ListBucket permission. If you have the s3:ListBucket permission on +// the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. +// If you don’t have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a HEAD request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when -// you retrieve the metadata from the object, you must use the following headers: +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: // // - x-amz-server-side-encryption-customer-algorithm // @@ -5628,46 +6237,32 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // - x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. // -// - Encryption request headers, like x-amz-server-side-encryption, should -// not be sent for GET requests if your object uses server-side encryption -// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, -// you’ll get an HTTP 400 BadRequest error. +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// - The last modified property in this case is the creation date of the -// object. +// Versioning // -// Request headers are limited to 8 KB in size. For more information, see Common -// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in +// the response. // -// Consider the following when using request headers: +// - If the specified version is a delete marker, the response returns a +// 405 Method Not Allowed error and the Last-Modified: timestamp response +// header. // -// - Consideration 1 – If both of the If-Match and If-Unmodified-Since -// headers are present in the request as follows: If-Match condition evaluates -// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -// S3 returns 200 OK and the data requested. +// - Directory buckets - Delete marker is not supported by directory buckets. // -// - Consideration 2 – If both of the If-None-Match and If-Modified-Since -// headers are present in the request as follows: If-None-Match condition -// evaluates to false, and; If-Modified-Since condition evaluates to true; -// Then Amazon S3 returns the 304 Not Modified response code. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. // -// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// # HTTP Host header syntax // -// # Permissions -// -// You need the relevant read object (or version) permission for this operation. -// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// If the object you request does not exist, the error Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns -// an HTTP status code 404 ("no such key") error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns -// an HTTP status code 403 ("access denied") error. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following actions are related to HeadObject: // @@ -5749,6 +6344,8 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // @@ -5848,6 +6445,8 @@ func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucket // ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the S3 Intelligent-Tiering configuration from the specified bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage @@ -5946,6 +6545,8 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // @@ -6045,6 +6646,8 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Lists the metrics configurations for the bucket. The metrics configurations // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. @@ -6145,9 +6748,14 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // ListBuckets API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns a list of all buckets owned by the authenticated sender of the request. // To use this operation, you must have the s3:ListAllMyBuckets permission. // +// For information about Amazon S3 buckets, see Creating, configuring, and working +// with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6176,6 +6784,160 @@ func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, op return out, req.Send() } +const opListDirectoryBuckets = "ListDirectoryBuckets" + +// ListDirectoryBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListDirectoryBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDirectoryBuckets for more information on using the ListDirectoryBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDirectoryBucketsRequest method. +// req, resp := client.ListDirectoryBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBucketsRequest(input *ListDirectoryBucketsInput) (req *request.Request, output *ListDirectoryBucketsOutput) { + op := &request.Operation{ + Name: opListDirectoryBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"ContinuationToken"}, + LimitToken: "MaxDirectoryBuckets", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDirectoryBucketsInput{} + } + + output = &ListDirectoryBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDirectoryBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed +// by the Amazon Web Services account that owns the resource. For more information +// about directory bucket policies and permissions, see Amazon Web Services +// Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListDirectoryBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + return out, req.Send() +} + +// ListDirectoryBucketsWithContext is the same as ListDirectoryBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDirectoryBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, opts ...request.Option) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDirectoryBucketsPages iterates over the pages of a ListDirectoryBuckets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDirectoryBuckets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDirectoryBuckets operation. +// pageNum := 0 +// err := client.ListDirectoryBucketsPages(params, +// func(page *s3.ListDirectoryBucketsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListDirectoryBucketsPages(input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool) error { + return c.ListDirectoryBucketsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDirectoryBucketsPagesWithContext same as ListDirectoryBucketsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsPagesWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDirectoryBucketsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDirectoryBucketsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDirectoryBucketsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the @@ -6225,28 +6987,79 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This action lists in-progress multipart uploads. An in-progress multipart -// upload is a multipart upload that has been initiated using the Initiate Multipart -// Upload request, but has not yet been completed or aborted. +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload +// request, but has not yet been completed or aborted. // -// This action returns at most 1,000 multipart uploads in the response. 1,000 -// multipart uploads is the maximum number of uploads a response can include, -// which is also the default value. You can further limit the number of uploads -// in a response by specifying the max-uploads parameter in the response. If -// additional multipart uploads satisfy the list criteria, the response will -// contain an IsTruncated element with the value true. To list the additional -// multipart uploads, use the key-marker and upload-id-marker request parameters. +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. // -// In the response, the uploads are sorted by key. If your application has initiated -// more than one multipart upload using the same object key, then uploads in -// the response are first sorted by key. Additionally, uploads are sorted in -// ascending order within each key by the upload initiation time. +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads request parameter. If there are more than 1,000 multipart +// uploads that satisfy your ListMultipartUploads request, the response returns +// an IsTruncated element with the value of true, a NextKeyMarker element, and +// a NextUploadIdMarker element. To list the remaining multipart uploads, you +// need to make subsequent ListMultipartUploads requests. In these requests, +// include two query parameters: key-marker and upload-id-marker. Set the value +// of key-marker to the NextKeyMarker value from the previous response. Similarly, +// set the value of upload-id-marker to the NextUploadIdMarker value from the +// previous response. // -// For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. // -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting of multipart uploads in response +// +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: Key-based sorting - Multipart +// uploads are initially sorted in ascending order based on their object +// keys. Time-based sorting - For uploads that share the same object key, +// they are further sorted in ascending order based on the upload initiation +// time. Among uploads with the same key, the one that was initiated first +// will appear before the ones that were initiated later. +// +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to ListMultipartUploads: // @@ -6388,11 +7201,13 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns metadata about all versions of the objects in a bucket. You can also // use request parameters as selection criteria to return metadata about a subset // of all the object versions. // -// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// To use this operation, you must have permission to perform the s3:ListBucketVersions // action. Be aware of the name difference. // // A 200 OK response can contain valid or invalid XML. Make sure to design your @@ -6400,8 +7215,6 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // To use this operation, you must have READ access to the bucket. // -// This action is not supported by Amazon S3 on Outposts. -// // The following operations are related to ListObjectVersions: // // - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) @@ -6540,6 +7353,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // ListObjects API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects // in a bucket. A 200 OK response can contain valid or invalid XML. Be sure @@ -6700,25 +7515,57 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // You can use the request parameters as selection criteria to return a subset // of the objects in a bucket. A 200 OK response can contain valid or invalid // XML. Make sure to design your application to parse the contents of the response -// and handle it appropriately. Objects are returned sorted in an ascending -// order of the respective key names in the list. For more information about -// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// and handle it appropriately. For more information about listing objects, +// see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). // -// To use this operation, you must have READ access to the bucket. +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// To use this action in an Identity and Access Management (IAM) policy, you -// must have permissions to perform the s3:ListBucket action. The bucket owner -// has this permission by default and can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting order of returned objects +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 +// returns objects in lexicographical order based on their key names. +// +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // This section describes the latest revision of this action. We recommend that -// you use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). -// -// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). // // The following operations are related to ListObjectsV2: // @@ -6862,24 +7709,58 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // ListParts API operation for Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. -// This operation must include the upload ID, which you obtain by sending the -// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). -// This request returns a maximum of 1,000 uploaded parts. The default number -// of parts returned is 1,000 parts. You can restrict the number of parts returned -// by specifying the max-parts request parameter. If your multipart upload consists -// of more than 1,000 parts, the response returns an IsTruncated field with -// the value of true, and a NextPartNumberMarker element. In subsequent ListParts -// requests you can include the part-number-marker query string parameter and -// set its value to the NextPartNumberMarker field value from the previous response. // -// If the upload was created using a checksum algorithm, you will need to have -// permission to the kms:Decrypt action for the request to succeed. +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit +// of 1,000 parts is also the default value. You can restrict the number of +// parts in a response by specifying the max-parts request parameter. If your +// multipart upload consists of more than 1,000 parts, the response returns +// an IsTruncated field with the value of true, and a NextPartNumberMarker element. +// To list remaining uploaded parts, in subsequent ListParts requests, include +// the part-number-marker query string parameter and set its value to the NextPartNumberMarker +// field value from the previous response. // // For more information on multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. // -// For information on permissions required to use the multipart upload API, -// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you +// must have permission to the kms:Decrypt action for the ListParts request +// to succeed. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // // The following operations are related to ListParts: // @@ -7018,6 +7899,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer // Acceleration is a bucket-level feature that enables you to perform faster // data transfers to Amazon S3. @@ -7130,9 +8013,11 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // PutBucketAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the permissions on an existing bucket using access control lists (ACL). // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// To set the ACL of a bucket, you must have WRITE_ACP permission. +// To set the ACL of a bucket, you must have the WRITE_ACP permission. // // You can use one of the following two ways to set a bucket's permissions: // @@ -7156,9 +8041,9 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// # Access Permissions +// # Permissions // -// You can set access permissions using one of the following methods: +// You can set access permissions by using one of the following methods: // // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports // a set of predefined ACLs, known as canned ACLs. Each canned ACL has a @@ -7208,7 +8093,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // xsi:type="Group"><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // - By Email address: <>Grantees@email.com<>lt;/Grantee> +// xsi:type="AmazonCustomerByEmail"><>Grantees@email.com<>& // The grantee is resolved to the CanonicalUser and, in a response to a GET // Object acl request, appears as the CanonicalUser. Using email addresses // to specify a grantee is only supported in the following Amazon Web Services @@ -7218,7 +8103,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) // in the Amazon Web Services General Reference. // -// Related Resources +// The following operations are related to PutBucketAcl: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -7298,6 +8183,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets an analytics configuration for the bucket (specified by the analytics // configuration ID). You can have up to 1,000 analytics configurations per // bucket. @@ -7323,7 +8210,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// Special Errors +// PutBucketAnalyticsConfiguration has the following special errors: // // - HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid // argument. @@ -7336,7 +8223,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration // bucket permission to set the configuration on the bucket. // -// Related Resources +// The following operations are related to PutBucketAnalyticsConfiguration: // // - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // @@ -7420,6 +8307,8 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // PutBucketCors API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the cors configuration for your bucket. If the configuration exists, // Amazon S3 replaces it. // @@ -7456,7 +8345,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon // S3 User Guide. // -// Related Resources +// The following operations are related to PutBucketCors: // // - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) // @@ -7540,31 +8429,33 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This action uses the encryption subresource to configure default encryption -// and Amazon S3 Bucket Key for an existing bucket. +// This operation is not supported by directory buckets. // -// Default encryption for a bucket can use server-side encryption with Amazon -// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify -// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket -// Key. When the default encryption is SSE-KMS, if you upload an object to the -// bucket and do not specify the KMS key to use for encryption, Amazon S3 uses -// the default Amazon Web Services managed KMS key for your account. For information -// about default encryption, see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. +// This action uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Keys for an existing bucket. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally +// configure default encryption for a bucket by using server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default +// encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html). If you +// use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 +// does not validate the KMS key ID provided in PutBucketEncryption requests. // // This action requires Amazon Web Services Signature Version 4. For more information, // see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // -// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, // see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Related Resources +// The following operations are related to PutBucketEncryption: // // - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // @@ -7642,6 +8533,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Puts a S3 Intelligent-Tiering configuration to the specified bucket. You // can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. // @@ -7674,17 +8567,26 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // move objects stored in the S3 Intelligent-Tiering storage class to the Archive // Access or Deep Archive Access tier. // -// Special Errors +// PutBucketIntelligentTieringConfiguration has the following special errors: // -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// # HTTP 400 Bad Request Error // -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. +// Code: InvalidArgument // -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration -// bucket permission to set the configuration on the bucket. +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7758,6 +8660,8 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This implementation of the PUT action adds an inventory configuration (identified // by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations // per bucket. @@ -7780,26 +8684,50 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // an example policy, see Granting Permissions for Amazon S3 Inventory and Storage // Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). // -// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// # Permissions +// +// To use this operation, you must have permission to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default and can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an S3 +// Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify +// the destination bucket to store the inventory. A user with read access to +// objects in the destination bucket can also access all object metadata fields +// that are available in the inventory report. +// +// To restrict access to an inventory report, see Restricting access to an Amazon +// S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// Special Errors +// PutBucketInventoryConfiguration has the following special errors: // -// - HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// # HTTP 400 Bad Request Error // -// - HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. +// Code: InvalidArgument // -// - HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. +// Cause: Invalid Argument // -// Related Resources +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutInventoryConfiguration bucket permission to set the configuration +// on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration: // // - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // @@ -7888,6 +8816,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // PutBucketLifecycle API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). // This version has been deprecated. Existing lifecycle configurations will // work. For new lifecycle configurations, use the updated API. @@ -7922,7 +8852,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). // -// Related Resources +// The following operations are related to PutBucketLifecycle: // // - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) // @@ -8017,6 +8947,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. Keep in mind that this will overwrite an existing // lifecycle configuration, so if you want to retain any configuration details, @@ -8024,10 +8956,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version -// of the API supported filtering based only on an object key name prefix, which -// is supported for backward compatibility. For the related API description, +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, // see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). // // # Rules @@ -8037,11 +8969,11 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // S3 Lifecycle configuration can have up to 1,000 rules. This limit is not // adjustable. Each rule consists of the following: // -// - Filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination -// of both. +// - A filter identifying a subset of objects to which the rule applies. +// The filter can be based on a key name prefix, object tags, object size, +// or any combination of these. // -// - Status whether the rule is in effect. +// - A status indicating whether the rule is in effect. // // - One or more lifecycle transition and expiration actions that you want // Amazon S3 to perform on the objects identified by the filter. If the state @@ -8062,10 +8994,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // optionally grant access permissions to others by writing an access policy. // For this operation, a user must get the s3:PutLifecycleConfiguration permission. // -// You can also explicitly deny permissions. Explicit deny also supersedes any -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: +// You can also explicitly deny permissions. An explicit deny also supersedes +// any other permissions. If you want to block users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: // // - s3:DeleteObject // @@ -8076,7 +9008,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // For more information about permissions, see Managing Access Permissions to // Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// The following are related to PutBucketLifecycleConfiguration: +// The following operations are related to PutBucketLifecycleConfiguration: // // - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) // @@ -8160,6 +9092,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // PutBucketLogging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Set the logging parameters for a bucket and to specify permissions for who // can view and modify the logging parameters. All logs are saved to buckets // in the same Amazon Web Services Region as the source bucket. To set the logging @@ -8178,7 +9112,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // # Grantee Values // // You can specify the person (grantee) to whom you're assigning access rights -// (using request elements) in the following ways: +// (by using request elements) in the following ways: // // - By the person's ID: <>ID<><>GranteesEmail<> @@ -8186,8 +9120,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // - By Email address: <>Grantees@email.com<> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. +// The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl +// request, appears as the CanonicalUser. // // - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> @@ -8287,6 +9221,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets a metrics configuration (specified by the metrics configuration ID) // for the bucket. You can have up to 1,000 metrics configurations per bucket. // If you're updating an existing metrics configuration, note that this is a @@ -8310,7 +9246,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // -// GetBucketLifecycle has the following special error: +// PutBucketMetricsConfiguration has the following special error: // // - Error code: TooManyConfigurations Description: You are attempting to // create a new configuration but have already reached the 1,000-configuration @@ -8397,6 +9333,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // PutBucketNotification API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) // operation. // @@ -8476,6 +9414,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Enables notifications of specified events for a bucket. For more information // about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // @@ -8511,7 +9451,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // By default, only the bucket owner can configure notifications on a bucket. // However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with s3:PutBucketNotification permission. +// users to set this configuration with the required s3:PutBucketNotification +// permission. // // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. @@ -8519,8 +9460,6 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // messages to your SNS topic. If the message fails, the entire PUT action will // fail, and Amazon S3 will not add the configuration to your bucket. // -// # Responses -// // If the configuration in the request body includes only one TopicConfiguration // specifying only the s3:ReducedRedundancyLostObject event type, the response // will also include the x-amz-sns-test-message-id header containing the message @@ -8606,6 +9545,8 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this // operation, you must have the s3:PutBucketOwnershipControls permission. For // more information about Amazon S3 permissions, see Specifying permissions @@ -8696,22 +9637,62 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using -// an identity other than the root user of the Amazon Web Services account that -// owns the bucket, the calling identity must have the PutBucketPolicy permissions -// on the specified bucket and belong to the bucket owner's account in order -// to use this operation. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the PutBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. // // If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access // Denied error. If you have the correct permissions, but you're not using an // identity that belongs to the bucket owner's account, Amazon S3 returns a // 405 Method Not Allowed error. // -// As a security precaution, the root user of the Amazon Web Services account -// that owns a bucket can always use this operation, even if the policy explicitly -// denies the root user the ability to perform this action. +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. // -// For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// - General purpose bucket permissions - The s3:PutBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. // // The following operations are related to PutBucketPolicy: // @@ -8795,6 +9776,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // PutBucketReplication API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates a replication configuration or replaces an existing one. For more // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 User Guide. @@ -8803,6 +9786,9 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // configuration, you provide the name of the destination bucket or buckets // where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 // can assume to replicate objects on your behalf, and other relevant information. +// You can invoke this request for a specific Amazon Web Services Region by +// using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. // // A replication configuration must include at least one rule, and can contain // a maximum of 1,000. Each rule identifies a subset of objects to replicate @@ -8931,6 +9917,8 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // PutBucketRequestPayment API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download @@ -9019,6 +10007,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // PutBucketTagging API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the tags for a bucket. // // Use tags to organize your Amazon Web Services bill to reflect your own cost @@ -9029,7 +10019,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // name, and then organize your billing information to see the total cost of // that application across several services. For more information, see Cost // Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). // // When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to @@ -9041,22 +10031,20 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// PutBucketTagging has the following special errors: +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // -// - Error code: InvalidTagError Description: The tag provided was not a -// valid tag. This error can occur if the tag did not pass input validation. -// For information about tag restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Using +// Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). // -// - Error code: MalformedXMLError Description: The XML provided does not -// match the schema. +// - MalformedXML - The XML provided does not match the schema. // -// - Error code: OperationAbortedError Description: A conflicting conditional -// action is currently in progress against this resource. Please try again. +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. // -// - Error code: InternalError Description: The service was unable to apply -// the provided tag to the bucket. +// - InternalError - The service was unable to apply the provided tag to +// the bucket. // // The following operations are related to PutBucketTagging: // @@ -9140,6 +10128,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // PutBucketVersioning API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the versioning state of an existing bucket. // // You can set the versioning state with one of the following values: @@ -9159,15 +10149,15 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // you must include the x-amz-mfa request header and the Status and the MfaDelete // request elements in a request to set the versioning state of the bucket. // -// If you have an object expiration lifecycle policy in your non-versioned bucket -// and you want to maintain the same permanent delete behavior when you enable -// versioning, you must add a noncurrent expiration policy. The noncurrent expiration -// lifecycle policy will manage the deletes of the noncurrent object versions -// in the version-enabled bucket. (A version-enabled bucket maintains one current -// and zero or more noncurrent object versions.) For more information, see Lifecycle -// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). // -// Related Resources +// The following operations are related to PutBucketVersioning: // // - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // @@ -9251,6 +10241,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // PutBucketWebsite API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Sets the configuration of the website that is specified in the website subresource. // To configure a bucket as a website, you can add this subresource on the bucket // with website configuration information such as the file name of the index @@ -9318,6 +10310,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) // in the Amazon S3 User Guide. // +// The maximum request length is limited to 128 KB. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9389,99 +10383,85 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // PutObject API operation for Amazon Simple Storage Service. // -// Adds an object to a bucket. You must have WRITE permissions on a bucket to -// add an object to it. +// Adds an object to a bucket. // -// Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject +// to only update a single piece of metadata for an existing object. You +// must put the entire object with updated metadata if you want to update +// some values. +// +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written +// to the bucket by any account will be owned by the bucket owner. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // // Amazon S3 is a distributed system. If it receives multiple write requests // for the same object simultaneously, it overwrites all but the last object -// written. Amazon S3 does not provide object locking; if you need this, make -// sure to build it into your application layer or use versioning instead. +// written. However, Amazon S3 provides features that can modify this behavior: // -// To ensure that data is not corrupted traversing the network, use the Content-MD5 -// header. When you use this header, Amazon S3 checks the object against the -// provided MD5 value and, if they do not match, returns an error. Additionally, -// you can calculate the MD5 while putting an object to Amazon S3 and compare -// the returned ETag to the calculated MD5 value. +// - S3 Object Lock - To prevent objects from being deleted or overwritten, +// you can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. // -// - To successfully complete the PutObject request, you must have the s3:PutObject -// in your IAM permissions. +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it +// stores all versions of the objects. For each write request that is made +// to the same object, Amazon S3 automatically generates a unique version +// ID of that object being stored in Amazon S3. You can retrieve, replace, +// or delete any version of the object. For more information about versioning, +// see Adding Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// This functionality is not supported for directory buckets. // -// - To successfully change the objects acl of your PutObject request, you -// must have the s3:PutObjectAcl in your IAM permissions. +// Permissions // -// - The Content-MD5 header is required for any request to upload an object -// with a retention period configured using Amazon S3 Object Lock. For more -// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) -// in the Amazon S3 User Guide. +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to +// it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully +// set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. // -// # Server-side Encryption +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). // -// You can optionally request server-side encryption. With server-side encryption, -// Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts the data when you access it. You have the option to provide -// your own encryption key or use Amazon Web Services managed encryption keys -// (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// Data integrity with Content-MD5 // -// If you request server-side encryption using Amazon Web Services Key Management -// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For -// more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon +// S3 checks the object against the provided MD5 value and, if they do not +// match, Amazon S3 returns an error. Alternatively, when the object's ETag +// is its MD5 digest, you can calculate the MD5 while putting the object +// to Amazon S3 and compare the returned ETag to the calculated MD5 value. // -// # Access Control List (ACL)-Specific Request Headers +// - Directory bucket - This functionality is not supported for directory +// buckets. // -// You can use headers to grant ACL- based permissions. By default, all objects -// are private. Only the owner has full access control. When adding a new object, -// you can grant permissions to individual Amazon Web Services accounts or to -// predefined groups defined by Amazon S3. These permissions are then added -// to the ACL on the object. For more information, see Access Control List (ACL) -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// # HTTP Host header syntax // -// If the bucket that you're uploading objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify -// an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this -// ACL expressed in the XML format. PUT requests that contain other ACLs (for -// example, custom grants to certain Amazon Web Services accounts) fail and -// return a 400 error with the error code AccessControlListNotSupported. +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // -// For more information, see Controlling ownership of objects and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. -// -// If your bucket uses the bucket owner enforced setting for Object Ownership, -// all objects written to the bucket by any account will be owned by the bucket -// owner. -// -// # Storage Class Options -// -// By default, Amazon S3 uses the STANDARD Storage Class to store newly created -// objects. The STANDARD storage class provides high durability and high availability. -// Depending on performance needs, you can specify a different Storage Class. -// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, -// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. -// -// # Versioning -// -// If you enable versioning for a bucket, Amazon S3 automatically generates -// a unique version ID for the object being stored. Amazon S3 returns this ID -// in the response. When you enable versioning for a bucket, if Amazon S3 receives -// multiple write requests for the same object simultaneously, it stores all -// of the objects. -// -// For more information about versioning, see Adding Objects to Versioning Enabled -// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). -// For information about returning the versioning state of a bucket, see GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). -// -// Related Resources +// For more information about related Amazon S3 APIs, see the following: // // - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // @@ -9562,13 +10542,15 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // PutObjectAcl API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Uses the acl subresource to set the access control list (ACL) permissions -// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission -// to set the ACL of an object. For more information, see What permissions can -// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// for a new or existing object in an S3 bucket. You must have the WRITE_ACP +// permission to set the ACL of an object. For more information, see What permissions +// can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) // in the Amazon S3 User Guide. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Depending on your application needs, you can choose to set the ACL on an // object using either the request body or the headers. For example, if you @@ -9585,7 +10567,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. // -// # Access Permissions +// # Permissions // // You can set access permissions using one of the following methods: // @@ -9651,7 +10633,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // sets the ACL of the current version of an object. To set the ACL of a different // version, use the versionId subresource. // -// Related Resources +// The following operations are related to PutObjectAcl: // // - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // @@ -9737,10 +10719,12 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // PutObjectLegalHold API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Applies a legal hold configuration to the specified object. For more information, // see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9817,6 +10801,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Places an Object Lock configuration on the specified bucket. The rule specified // in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. For more information, see Locking @@ -9827,8 +10813,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // - The DefaultRetention period can be either Days or Years but you must // select one. You cannot specify Days and Years at the same time. // -// - You can only enable Object Lock for new buckets. If you want to turn -// on Object Lock for an existing bucket, contact Amazon Web Services Support. +// - You can enable Object Lock for new or existing buckets. For more information, +// see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9905,13 +10891,15 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // PutObjectRetention API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Places an Object Retention configuration on an object. For more information, // see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // Users or accounts require the s3:PutObjectRetention permission in order to // place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9988,12 +10976,15 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // PutObjectTagging API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket. +// This operation is not supported by directory buckets. // -// A tag is a key-value pair. You can associate tags with an object by sending -// a PUT request against the tagging subresource that is associated with the -// object. You can retrieve tags by sending a GET request. For more information, -// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// Sets the supplied tag-set to an object that already exists in a bucket. A +// tag is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve +// tags by sending a GET request. For more information, see GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). // // For tagging-related restrictions related to characters and encodings, see // Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). @@ -10006,24 +10997,22 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // To put tags of any other version, use the versionId query parameter. You // also need permission for the s3:PutObjectVersionTagging action. // -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // -// Special Errors +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). // -// - Code: InvalidTagError Cause: The tag provided was not a valid tag. This -// error can occur if the tag did not pass input validation. For more information, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// - MalformedXML - The XML provided does not match the schema. // -// - Code: MalformedXMLError Cause: The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. // -// - Code: OperationAbortedError Cause: A conflicting conditional action -// is currently in progress against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to +// the object. // -// - Code: InternalError Cause: The service was unable to apply the provided -// tag to the object. -// -// Related Resources +// The following operations are related to PutObjectTagging: // // - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // @@ -10105,6 +11094,8 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req // PutPublicAccessBlock API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 // bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock // permission. For more information about Amazon S3 permissions, see Specifying @@ -10120,7 +11111,7 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req // For more information about when Amazon S3 considers a bucket or an object // public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // -// Related Resources +// The following operations are related to PutPublicAccessBlock: // // - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // @@ -10201,16 +11192,29 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // RestoreObject API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // # Restores an archived copy of an object back into Amazon S3 // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // This action performs the following types of requests: // -// - select - Perform a select query on an archived object -// // - restore an archive - Restore an archived object // +// For more information about the S3 structure in the request body, see the +// following: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide +// +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// # Permissions +// // To use this operation, you must have permissions to perform the s3:RestoreObject // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions @@ -10218,114 +11222,57 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. // -// # Querying Archives with Select Requests -// -// You use a select type of request to perform SQL queries on archived objects. -// The archived objects that are being queried by the select request must be -// formatted as uncompressed comma-separated values (CSV) files. You can run -// queries and custom analytics on your archived data without having to restore -// your data to a hotter Amazon S3 tier. For an overview about select requests, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. -// -// When making a select request, do the following: -// -// - Define an output location for the select query's output. This must be -// an Amazon S3 bucket in the same Amazon Web Services Region as the bucket -// that contains the archive object that is being queried. The Amazon Web -// Services account that initiates the job must have permissions to write -// to the S3 bucket. You can specify the storage class and encryption for -// the output objects stored in the bucket. For more information about output, -// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. For more information about the S3 structure -// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide -// -// - Define the SQL expression for the SELECT type of restoration for your -// query in the request body's SelectParameters structure. You can use expressions -// like the following examples. The following expression returns all records -// from the specified object. SELECT * FROM Object Assuming that you are -// not using any headers for data stored in the object, you can specify columns -// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > -// 100 If you have headers and you set the fileHeaderInfo in the CSV structure -// in the request body to USE, you can specify headers in the query. (If -// you set the fileHeaderInfo field to IGNORE, the first row is skipped for -// the query.) You cannot mix ordinal positions with header column names. -// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s -// -// For more information about using SQL with S3 Glacier Select restore, see -// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// -// When making a select request, you can also do the following: -// -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded -// query results. -// -// The following are additional important facts about the select feature: -// -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// policy. -// -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. -// -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409. -// // # Restoring objects // -// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage -// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers are not accessible in real time. For objects in Archive Access -// or Deep Archive Access tiers you must first initiate a restore request, and -// then wait until the object is moved into the Frequent Access tier. For objects -// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate -// a restore request, and then wait until a temporary copy of the object is -// available. To access an archived object, you must restore the object for -// the duration (number of days) that you specify. +// Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive +// or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real +// time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage classes, you must first initiate a restore +// request, and then wait until a temporary copy of the object is available. +// If you want a permanent copy of the object, create a copy of it in the Amazon +// S3 Standard storage class in your S3 bucket. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. +// For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. // -// When restoring an archived object (or using a select request), you can specify -// one of the following data access tier options in the Tier element of the -// request body: +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: // // - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive -// tier when occasional urgent requests for a subset of archives are required. -// For all but the largest archived objects (250 MB+), data accessed using -// Expedited retrievals is typically made available within 1–5 minutes. -// Provisioned capacity ensures that retrieval capacity for Expedited retrievals -// is available when you need it. Expedited retrievals and provisioned capacity -// are not available for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier when occasional urgent requests +// for restoring archives are required. For all but the largest archived +// objects (250 MB+), data accessed using Expedited retrievals is typically +// made available within 1–5 minutes. Provisioned capacity ensures that +// retrieval capacity for Expedited retrievals is available when you need +// it. Expedited retrievals and provisioned capacity are not available for +// objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. // // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval // requests that do not specify the retrieval option. Standard retrievals // typically finish within 3–5 hours for objects stored in the S3 Glacier -// storage class or S3 Intelligent-Tiering Archive tier. They typically finish -// within 12 hours for objects stored in the S3 Glacier Deep Archive storage -// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals -// are free for objects stored in S3 Intelligent-Tiering. +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. They typically finish within 12 hours for objects stored +// in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. Standard retrievals are free for objects stored in +// S3 Intelligent-Tiering. // -// - Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, -// enabling you to retrieve large amounts, even petabytes, of data inexpensively. -// Bulk retrievals typically finish within 5–12 hours for objects stored -// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. -// They typically finish within 48 hours for objects stored in the S3 Glacier -// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. -// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to +// retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals +// typically finish within 5–12 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. Bulk retrievals are also the lowest-cost retrieval option +// when restoring objects from S3 Glacier Deep Archive. They typically finish +// within 48 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) @@ -10368,11 +11315,9 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // - If the object is previously restored, Amazon S3 returns 200 OK in the // response. // -// Special Errors -// -// - Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. -// (This error does not apply to SELECT type requests.) HTTP Status Code: -// 409 Conflict SOAP Fault Code Prefix: Client +// - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore +// is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code +// Prefix: Client // // - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals // are currently not available. Try again later. (Returned if there is insufficient @@ -10380,15 +11325,12 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP // Status Code: 503 SOAP Fault Code Prefix: N/A // -// Related Resources +// The following operations are related to RestoreObject: // // - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // // - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // -// - SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10474,6 +11416,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, @@ -10482,20 +11426,16 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SQL expression. You must also specify the data serialization format for the // response. // -// This action is not supported by Amazon S3 on Outposts. +// This functionality is not supported for Amazon S3 on Outposts. // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) // and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) // in the Amazon S3 User Guide. // -// For more information about using SQL with Amazon S3 Select, see SQL Reference -// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. -// // # Permissions // -// You must have s3:GetObject permission for this operation. Amazon S3 Select +// You must have the s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) // in the Amazon S3 User Guide. @@ -10522,10 +11462,10 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. For objects that are encrypted with Amazon -// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), -// server-side encryption is handled transparently, so you don't need to -// specify anything. For more information about server-side encryption, including -// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide. // // # Working with the Response Body @@ -10545,9 +11485,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // in the request parameters), you cannot specify the range of bytes of an // object to return. // -// - GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot -// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. -// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// - The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or +// the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, +// or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS +// or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +// class. For more information about storage classes, see Using Amazon S3 +// storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) // in the Amazon S3 User Guide. // // # Special Errors @@ -10555,7 +11499,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // For a list of special errors for this operation, see List of SELECT Object // Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // -// Related Resources +// The following operations are related to SelectObjectContent: // // - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -10592,6 +11536,7 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject } var _ awserr.Error +var _ time.Time // SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. // @@ -10803,15 +11748,15 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Uploads a part in a multipart upload. // -// In this operation, you provide part data in your request. However, you have -// an option to specify your existing Amazon S3 object as a data source for -// the part you are uploading. To upload a part from an existing object, you -// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as +// a data source for the part you are uploading. To upload a part from an existing +// object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // operation. // // You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) // before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier, that you must include in your +// S3 returns an upload ID, a unique identifier that you must include in your // upload part request. // // Part numbers can be any number from 1 to 10,000, inclusive. A part number @@ -10823,18 +11768,8 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) // in the Amazon S3 User Guide. // -// To ensure that data is not corrupted when traversing the network, specify -// the Content-MD5 header in the upload part request. Amazon S3 checks the part -// data against the provided MD5 value. If they do not match, Amazon S3 returns -// an error. -// -// If the upload request is signed with Signature Version 4, then Amazon Web -// Services S3 uses the x-amz-content-sha256 header as a checksum instead of -// Content-MD5. For more information see Authenticating Requests: Using the -// Authorization Header (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. @@ -10843,44 +11778,90 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the // Amazon S3 User Guide . // -// For information on the permissions required to use the multipart upload API, -// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) // in the Amazon S3 User Guide. // -// You can optionally request server-side encryption where Amazon S3 encrypts -// your data as it writes it to disks in its data centers and decrypts it for -// you when you access it. You have the option of providing your own encryption -// key, or you can use the Amazon Web Services managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide -// in the request must match the headers you used in the request to initiate -// the upload by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// Permissions +// +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Data integrity +// +// General purpose bucket - To ensure that data is not corrupted traversing +// the network, specify the Content-MD5 header in the upload part request. Amazon +// S3 checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header +// as a checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption +// +// - General purpose bucket - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. You have mutually exclusive +// options to protect data using server-side encryption in Amazon S3, depending +// on how you choose to manage the encryption keys. Specifically, the encryption +// key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS +// keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts +// data with server-side encryption using Amazon S3 managed keys (SSE-S3) +// by default. You can optionally tell Amazon S3 to encrypt data at rest +// using server-side encryption with other key options. The option you use +// depends on whether you want to use KMS keys (SSE-KMS) or provide your +// own encryption key (SSE-C). Server-side encryption is supported by the +// S3 Multipart Upload operations. Unless you are using a customer-provided +// encryption key (SSE-C), you don't need to specify the encryption parameters +// in each UploadPart request. Instead, you only need to specify the server-side +// encryption parameters in the initial Initiate Multipart request. For more +// information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you request server-side encryption using a customer-provided encryption +// key (SSE-C) in your initiate multipart upload request, you must provide +// identical encryption information in each part upload using the following +// request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 +// +// - Directory bucket - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) // in the Amazon S3 User Guide. // -// Server-side encryption is supported by the S3 Multipart Upload actions. Unless -// you are using a customer-provided encryption key, you don't need to specify -// the encryption parameters in each UploadPart request. Instead, you only need -// to specify the server-side encryption parameters in the initial Initiate -// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// Special errors // -// If you requested server-side encryption using a customer-provided encryption -// key in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following headers. +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// SOAP Fault Code Prefix: Client // -// - x-amz-server-side-encryption-customer-algorithm +// # HTTP Host header syntax // -// - x-amz-server-side-encryption-customer-key +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // -// - x-amz-server-side-encryption-customer-key-MD5 -// -// Special Errors -// -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code -// Prefix: Client -// -// Related Resources +// The following operations are related to UploadPart: // // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // @@ -10963,83 +11944,107 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // UploadPartCopy API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in -// your request and a byte range by adding the request header x-amz-copy-source-range +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in +// your request. To specify a byte range, you add the request header x-amz-copy-source-range // in your request. // // For information about maximum and minimum part sizes and other multipart // upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) // in the Amazon S3 User Guide. // -// Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action -// and provide data in your request. +// Instead of copying data from an existing object as part data, you might use +// the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. // // You must initiate a multipart upload before you can upload any part. In response -// to your initiate request. Amazon S3 returns a unique identifier, the upload -// ID, that you must include in your upload part request. +// to your initiate request, Amazon S3 returns the upload ID, a unique identifier +// that you must include in your upload part request. // -// For more information about using the UploadPartCopy operation, see the following: +// For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using +// a single atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in +// the Amazon S3 User Guide. // -// - For conceptual information about multipart uploads, see Uploading Objects -// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Authentication and authorization +// +// All UploadPartCopy requests must be authenticated and signed by using IAM +// credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// # Permissions +// +// You must have READ access to the source object and WRITE access to the destination +// bucket. +// +// - General purpose bucket permissions - You must have the permissions in +// a policy based on the bucket types of your source bucket and destination +// bucket in an UploadPartCopy operation. If the source object is in a general +// purpose bucket, you must have the s3:GetObject permission to read the +// source object that is being copied. If the destination bucket is a general +// purpose bucket, you must have the s3:PutObject permission to write the +// object copy to the destination bucket. For information about permissions +// required to use the multipart upload API, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) // in the Amazon S3 User Guide. // -// - For information about permissions required to use the multipart upload -// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in an UploadPartCopy operation. If the source object that +// you want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key cannot be set to ReadOnly on the copy destination. For example +// policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) // in the Amazon S3 User Guide. // -// - For information about copying objects using a single atomic action vs. -// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. +// Encryption // -// - For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// - General purpose buckets - For information about using server-side encryption +// with customer-provided encryption keys with the UploadPartCopy operation, +// see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). // -// Note the following additional considerations about the request headers x-amz-copy-source-if-match, -// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and -// x-amz-copy-source-if-modified-since: +// - Directory buckets - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. // -// - Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request as follows: x-amz-copy-source-if-match -// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since -// condition evaluates to false; Amazon S3 returns 200 OK and copies the -// data. +// Special errors // -// - Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// as follows: x-amz-copy-source-if-none-match condition evaluates to false, -// and; x-amz-copy-source-if-modified-since condition evaluates to true; -// Amazon S3 returns 412 Precondition Failed response code. +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found // -// # Versioning +// - Error Code: InvalidRequest Description: The specified copy source is +// not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request // -// If your bucket has versioning enabled, you could have multiple versions of -// the same object. By default, x-amz-copy-source identifies the current version -// of the object to copy. If the current version is a delete marker and you -// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 -// error, because the object does not exist. If you specify versionId in the -// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns -// an HTTP 400 error, because you are not allowed to specify a delete marker -// as a version for the x-amz-copy-source. +// # HTTP Host header syntax // -// You can optionally specify a specific version of the source object to copy -// by adding the versionId subresource as shown in the following example: +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. // -// x-amz-copy-source: /bucket/object?versionId=version id -// -// Special Errors -// -// - Code: NoSuchUpload Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been -// aborted or completed. HTTP Status Code: 404 Not Found -// -// - Code: InvalidRequest Cause: The specified copy source is not supported -// as a byte-range copy source. HTTP Status Code: 400 Bad Request -// -// Related Resources +// The following operations are related to UploadPartCopy: // // - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // @@ -11130,6 +12135,8 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // WriteGetObjectResponse API operation for Amazon Simple Storage Service. // +// This operation is not supported by directory buckets. +// // Passes transformed objects to a GetObject operation when using Object Lambda // access points. For information about Object Lambda access points, see Transforming // objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) @@ -11205,7 +12212,7 @@ func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetO // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -11244,27 +12251,41 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key of the object for which the multipart upload was initiated. @@ -11273,10 +12294,14 @@ type AbortMultipartUploadInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID that identifies the multipart upload. @@ -11397,6 +12422,8 @@ type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -11931,9 +12958,7 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name -// is globally unique, and the namespace is shared by all Amazon Web Services -// accounts. +// In terms of implementation, a Bucket is a resource. type Bucket struct { _ struct{} `type:"structure"` @@ -11975,6 +13000,51 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy *string `type:"string" enum:"DataRedundancy"` + + // The type of bucket. + Type *string `type:"string" enum:"BucketType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) GoString() string { + return s.String() +} + +// SetDataRedundancy sets the DataRedundancy field's value. +func (s *BucketInfo) SetDataRedundancy(v string) *BucketInfo { + s.DataRedundancy = &v + return s +} + +// SetType sets the Type field's value. +func (s *BucketInfo) SetType(v string) *BucketInfo { + s.Type = &v + return s +} + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // in the Amazon S3 User Guide. @@ -12261,7 +13331,9 @@ type CSVInput struct { // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character - // to indicate a comment line. + // to indicate a comment line. The default character is #. + // + // Default: # Comments *string `type:"string"` // A single character used to separate individual fields in a record. You can @@ -12444,34 +13516,42 @@ type Checksum struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` } @@ -12631,19 +13711,33 @@ type CompleteMultipartUploadInput struct { // Name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12677,9 +13771,9 @@ type CompleteMultipartUploadInput struct { // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -12691,16 +13785,23 @@ type CompleteMultipartUploadInput struct { MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // parameter is required only when the object was created using a checksum algorithm + // or if your bucket policy requires the use of SSE-C. For more information, + // see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // The server-side encryption (SSE) customer managed key. This parameter is @@ -12708,6 +13809,8 @@ type CompleteMultipartUploadInput struct { // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's // String and GoString methods. @@ -12717,6 +13820,8 @@ type CompleteMultipartUploadInput struct { // is needed only when the object was created using a checksum algorithm. For // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // ID for the initiated multipart upload. @@ -12893,55 +13998,52 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return // the access point ARN or access point alias if used. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Access points are not supported by directory buckets. Bucket *string `type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -12957,6 +14059,8 @@ type CompleteMultipartUploadOutput struct { // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The object key of the newly created object. @@ -12967,25 +14071,31 @@ type CompleteMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS key in your initiate multipart - // upload request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created object, in case the bucket has versioning // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -13138,34 +14248,42 @@ type CompletedPart struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -13174,6 +14292,16 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between // 1 and 10,000. + // + // * General purpose buckets - In CompleteMultipartUpload, when a additional + // checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) is applied to each part, the PartNumber must + // start at 1 and the part numbers must be consecutive. Otherwise, Amazon + // S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder + // error code. + // + // * Directory buckets - In CompleteMultipartUpload, the PartNumber must + // start at 1 and the part numbers must be consecutive. PartNumber *int64 `type:"integer"` } @@ -13333,71 +14461,134 @@ func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg even type CopyObjectInput struct { _ struct{} `locationName:"CopyObjectRequest" type:"structure"` - // The canned ACL to apply to the object. + // The canned access control list (ACL) to apply to the object. // - // This action is not supported by Amazon S3 on Outposts. + // When you copy an object, the ACL metadata is not preserved and is set to + // private by default. Only the owner has full access control. To override the + // default ACL setting, specify a new ACL when you generate a copy request. + // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // + // If the destination bucket that you're copying objects to uses the bucket + // owner enforced setting for S3 Object Ownership, ACLs are disabled and no + // longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. For more information, + // see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will + // be owned by the bucket owner. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The name of the destination bucket. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the + // object. // - // Specifying this header with a COPY action doesn’t affect bucket-level settings - // for S3 Bucket Key. + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. Specifying this header with a COPY action + // doesn’t affect bucket-level settings for S3 Bucket Key. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` - // Specifies caching behavior along the request/reply chain. + // Specifies the caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm you want Amazon S3 to use to create the checksum + // Indicates the algorithm that you want Amazon S3 to use to create the checksum // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's + // present on the source object). You can optionally specify a different checksum + // algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or + // unsupported values will respond with the HTTP status code 400 Bad Request. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type that describes the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // Specifies the source object for the copy operation. You specify the value - // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // Specifies the source object for the copy operation. The source object can + // be up to 5 GB. If the source object is an object that was uploaded by using + // a multipart upload, the object copy will be a single part object after the + // source object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending + // on whether you want to access the source object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and the key of the source object, separated by a slash - // (/). For example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value - // must be URL-encoded. + // (/). For example, to copy the object reports/january.pdf from the general + // purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. + // The value must be URL-encoded. To copy the object reports/january.pdf + // from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf. + // The value must be URL-encoded. // // * For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the @@ -13406,43 +14597,104 @@ type CopyObjectInput struct { // my-access-point owned by account 123456789012 in Region us-west-2, use // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. // The value must be URL-encoded. // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If your source bucket versioning is enabled, the x-amz-copy-source header + // by default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. + // To copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). // If you don't specify a version ID, Amazon S3 copies the latest version of // the source object. // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from + // the version ID of the source object. Amazon S3 returns the version ID of + // the copied object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, + // the version ID that Amazon S3 generates in the x-amz-version-id response + // header is always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // Specifies the algorithm to use when decrypting the source object (for example, // AES256). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. + // the source object. The encryption key provided in this header must be the + // same one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. // // CopySourceSSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's @@ -13452,16 +14704,23 @@ type CopyObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. @@ -13469,22 +14728,30 @@ type CopyObjectInput struct { // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // The key of the destination object. @@ -13496,35 +14763,69 @@ type CopyObjectInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. + // with metadata that's provided in the request. When copying an object, you + // can preserve all metadata (the default) or specify new metadata. If this + // header isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant permissions, + // you can use the s3:x-amz-metadata-directive condition key to enforce certain + // metadata behavior when objects are uploaded. For more information, see Amazon + // S3 condition key examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied + // when using the x-amz-metadata-directive header. To copy the value, you must + // specify x-amz-website-redirect-location in the request header. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - // Specifies whether you want to apply a legal hold to the copied object. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want the copied object's Object Lock to expire. + // The date and time when you want the Object Lock of the object copy to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon + // data. This value is used to store the object and then it is discarded. Amazon // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. @@ -13533,54 +14834,201 @@ type CopyObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the Amazon Web Services KMS key ID to use for object encryption. - // All GET and PUT requests for an object protected by Amazon Web Services KMS - // will fail if not made via SSL or using SigV4. For information about configuring - // using any of the officially supported Amazon Web Services SDKs and Amazon - // Web Services CLI, see Specifying the Signature Version in Request Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. + // All GET and PUT requests for an object protected by KMS will fail if they're + // not made via SSL or using SigV4. For information about configuring any of + // the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set + // to the default encryption configuration of the destination bucket. By default, + // all buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket + // has a default encryption configuration that uses server-side encryption with + // Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption + // with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding + // KMS key, or a customer-provided key to encrypt the target object copy. + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. + // For more information about server-side encryption, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high availability. - // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // If the x-amz-storage-class header is not used, the copied object will be + // stored in the STANDARD Storage Class by default. The STANDARD storage class + // provides high durability and high availability. Depending on performance + // needs, you can specify a different Storage Class. + // + // * Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported + // storage class values won't write a destination object and will respond + // with the HTTP status code 400 Bad Request. + // + // * Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage + // Class. + // + // You can use the CopyObject action to change the storage class of an object + // that is already stored in Amazon S3 by using the x-amz-storage-class header. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // + // * The storage class of the source object is GLACIER or DEEP_ARCHIVE. + // + // * The storage class of the source object is INTELLIGENT_TIERING and it's + // S3 Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access. + // + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) // in the Amazon S3 User Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters. + // The tag-set for the object copy in the destination bucket. This value must + // be used in conjunction with the x-amz-tagging-directive if you choose REPLACE + // for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, + // you don't need to set the x-amz-tagging header, because the tag-set will + // be copied from the source object directly. The tag-set must be encoded as + // URL Query parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // If the destination bucket is configured as a website, redirects requests + // for this object copy to another object in the same bucket or to an external + // URL. Amazon S3 stores the value of this header in the object metadata. This + // value is unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -13925,55 +15373,76 @@ type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` - // Version of the copied object in the destination bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopyObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -14066,34 +15535,26 @@ type CopyObjectResult struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -14164,34 +15625,42 @@ type CopyPartResult struct { _ struct{} `type:"structure"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -14260,8 +15729,29 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the Region where the bucket will be created. If you don't specify - // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. + Bucket *BucketInfo `type:"structure"` + + // Specifies the location where the bucket will be created. + // + // For directory buckets, the location type is Availability Zone. + // + // This functionality is only supported by directory buckets. + Location *LocationInfo `type:"structure"` + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous + // to create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. + // + // If you don't specify a Region, the bucket is created in the US East (N. Virginia) + // Region (us-east-1) by default. + // + // This functionality is not supported for directory buckets. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -14283,6 +15773,22 @@ func (s CreateBucketConfiguration) GoString() string { return s.String() } +// SetBucket sets the Bucket field's value. +func (s *CreateBucketConfiguration) SetBucket(v *BucketInfo) *CreateBucketConfiguration { + s.Bucket = v + return s +} + +func (s *CreateBucketConfiguration) getBucket() (v *BucketInfo) { + return s.Bucket +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketConfiguration) SetLocation(v *LocationInfo) *CreateBucketConfiguration { + s.Location = v + return s +} + // SetLocationConstraint sets the LocationConstraint field's value. func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { s.LocationConstraint = &v @@ -14293,10 +15799,25 @@ type CreateBucketInput struct { _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` // The name of the bucket to create. // + // General purpose buckets - For information about bucket naming restrictions, + // see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14305,24 +15826,36 @@ type CreateBucketInput struct { // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to create new objects in the bucket. // // For the bucket and object owners of existing objects, also allows deletions // and overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // + // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` // The container element for object ownership for a bucket's ownership controls. @@ -14337,8 +15870,19 @@ type CreateBucketInput struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` } @@ -14477,44 +16021,74 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { type CreateMultipartUploadInput struct { _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees + // and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. // - // This action is not supported by Amazon S3 on Outposts. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual + // Amazon Web Services accounts or to predefined groups defined by Amazon S3. + // These permissions are then added to the access control list (ACL) on the + // new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // One way to grant the permissions using the request headers is to specify + // a canned ACL with the x-amz-acl request header. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - // The name of the bucket to which to initiate the upload + // The name of the bucket where the multipart upload is initiated and where + // the object is uploaded. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm you want Amazon S3 to use to create the checksum + // Indicates the algorithm that you want Amazon S3 to use to create the checksum // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` @@ -14525,40 +16099,175 @@ type CreateMultipartUploadInput struct { // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - // The language the content is in. + // The language that the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // Specify access permissions explicitly to give the grantee READ, READ_ACP, + // and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - // Allows grantee to read the object data and its metadata. + // Specify access permissions explicitly to allow grantee to read the object + // data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - // Allows grantee to read the object ACL. + // Specify access permissions explicitly to allows grantee to read the object + // ACL. // - // This action is not supported by Amazon S3 on Outposts. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - // Allows grantee to write the ACL for the applicable object. + // Specify access permissions explicitly to allows grantee to allow grantee + // to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the multipart upload is to be initiated. @@ -14570,23 +16279,34 @@ type CreateMultipartUploadInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // Specifies the Object Lock mode that you want to apply to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -14595,56 +16315,70 @@ type CreateMultipartUploadInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the ID of the symmetric customer managed key to use for object - // encryption. All GET and PUT requests for an object protected by Amazon Web - // Services KMS will fail if not made via SSL or using SigV4. For information - // about configuring using any of the officially supported Amazon Web Services - // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in - // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -14917,38 +16651,32 @@ type CreateMultipartUploadOutput struct { // name in the request, the response includes this header. The header indicates // when the initiated multipart upload becomes eligible for an abort operation. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. // // The response also includes the x-amz-abort-rule-id header that provides the - // ID of the lifecycle configuration rule that defines this action. + // ID of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // The name of the bucket to which the multipart upload was initiated. Does // not return the access point ARN or access point alias if used. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Access points are not supported by directory buckets. Bucket *string `locationName:"Bucket" type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The algorithm that was used to create a checksum of the object. @@ -14959,38 +16687,50 @@ type CreateMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. // + // This functionality is not supported for directory buckets. + // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -15100,6 +16840,136 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } +type CreateSessionInput struct { + _ struct{} `locationName:"CreateSessionRequest" type:"structure"` + + // The name of the bucket that you create a session for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the mode of the session that will be created, either ReadWrite + // or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session + // is capable of executing all the Zonal endpoint APIs on a directory bucket. + // A ReadOnly session is constrained to execute the following Zonal endpoint + // APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, + // and ListMultipartUploads. + SessionMode *string `location:"header" locationName:"x-amz-create-session-mode" type:"string" enum:"SessionMode"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateSessionInput) SetBucket(v string) *CreateSessionInput { + s.Bucket = &v + return s +} + +func (s *CreateSessionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetSessionMode sets the SessionMode field's value. +func (s *CreateSessionInput) SetSessionMode(v string) *CreateSessionInput { + s.SessionMode = &v + return s +} + +func (s *CreateSessionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateSessionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateSessionOutput struct { + _ struct{} `type:"structure"` + + // The established temporary security credentials for the created session. + // + // Credentials is a required field + Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *CreateSessionOutput) SetCredentials(v *SessionCredentials) *CreateSessionOutput { + s.Credentials = v + return s +} + // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. // @@ -15163,7 +17033,12 @@ func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { type Delete struct { _ struct{} `type:"structure"` - // The objects to delete. + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects + // in the request will be deleted. // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -15234,9 +17109,9 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -15364,9 +17239,9 @@ type DeleteBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15481,9 +17356,9 @@ type DeleteBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15594,12 +17469,25 @@ type DeleteBucketInput struct { // Specifies the bucket being deleted. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -15811,9 +17699,9 @@ type DeleteBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -15941,9 +17829,9 @@ type DeleteBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16057,12 +17945,13 @@ type DeleteBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -16209,9 +18098,9 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16322,12 +18211,25 @@ type DeleteBucketPolicyInput struct { // The bucket name. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16441,9 +18343,9 @@ type DeleteBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16557,9 +18459,9 @@ type DeleteBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16673,9 +18575,9 @@ type DeleteBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -16792,7 +18694,7 @@ type DeleteMarkerEntry struct { // The object key. Key *string `min:"1" type:"string"` - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time `type:"timestamp"` // The account that created the delete marker.> @@ -16901,19 +18803,33 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -16922,11 +18838,13 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention // permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Key name of the object to delete. @@ -16938,16 +18856,25 @@ type DeleteObjectInput struct { // and the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -17070,16 +18997,24 @@ func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { type DeleteObjectOutput struct { _ struct{} `type:"structure"` - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Returns the version ID of the delete marker created as a result of the DELETE // operation. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -17124,27 +19059,30 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key that identifies the object in the bucket from which to remove all @@ -17291,19 +19229,33 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17312,22 +19264,38 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a Governance-type // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention // permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm // parameter. // - // This checksum algorithm must be the same for all parts and it match the checksum - // value supplied in the CreateMultipartUpload request. - // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value // is specified for this parameter, the matching algorithm's checksum member @@ -17343,22 +19311,37 @@ type DeleteObjectsInput struct { // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include + // an MFA token. If you don't provide an MFA token, the entire request will + // fail, even if there are non-versioned objects that you are trying to delete. + // If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. + // For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -17493,6 +19476,8 @@ type DeleteObjectsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -17540,9 +19525,9 @@ type DeletePublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -17652,20 +19637,27 @@ func (s DeletePublicAccessBlockOutput) GoString() string { type DeletedObject struct { _ struct{} `type:"structure"` - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `type:"boolean"` // The version ID of the delete marker created as a result of the DELETE operation. // If you delete a specific object version, the value returned by this header // is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string `type:"string"` // The name of the deleted object. Key *string `min:"1" type:"string"` // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -17871,9 +19863,9 @@ type Encryption struct { KMSContext *string `type:"string"` // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed key to use for encryption of job results. - // Amazon S3 only supports symmetric keys. For more information, see Using symmetric - // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // the symmetric encryption customer managed key to use for encryption of job + // results. Amazon S3 only supports symmetric encryption KMS keys. For more + // information, see Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. // // KMSKeyId is a sensitive parameter and its value will be @@ -17939,8 +19931,8 @@ type EncryptionConfiguration struct { // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric, customer managed KMS keys. For more information, - // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -18019,9 +20011,8 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It // is meant to be read and understood by programs that detect and handle errors - // by type. - // - // Amazon S3 error codes + // by type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). // // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 // Forbidden SOAP Fault Code Prefix: Client @@ -18341,8 +20332,8 @@ type Error struct { // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP - // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // * Code: ServiceUnavailable Description: Service is unable to handle request. + // HTTP Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server // // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: // 503 Slow Down SOAP Fault Code Prefix: Server @@ -18382,6 +20373,8 @@ type Error struct { Message *string `type:"string"` // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -18510,6 +20503,8 @@ func (s EventBridgeConfiguration) GoString() string { type ExistingObjectReplication struct { _ struct{} `type:"structure"` + // Specifies whether Amazon S3 replicates existing source bucket objects. + // // Status is a required field Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` } @@ -18551,8 +20546,15 @@ func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplicati return s } -// Specifies the Amazon S3 object key name to filter on and whether to filter -// on the suffix or prefix of the key name. +// Specifies the Amazon S3 object key name to filter on. An object key name +// is the name assigned to an object in your Amazon S3 bucket. You specify whether +// to filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can +// use to organize objects. For example, you can start the key names of related +// objects with a prefix, such as 2023- or engineering/. Then, you can use FilterRule +// to find objects in a bucket with key names that have the same prefix. A suffix +// is similar to a prefix, but it is at the end of the object key name instead +// of at the beginning. type FilterRule struct { _ struct{} `type:"structure"` @@ -18605,10 +20607,21 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation. @@ -18664,6 +20677,12 @@ func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetBucketAccelerateConfigurationInput) SetRequestPayer(v string) *GetBucketAccelerateConfigurationInput { + s.RequestPayer = &v + return s +} + func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -18694,6 +20713,12 @@ func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (int type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // The accelerate configuration of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -18716,6 +20741,12 @@ func (s GetBucketAccelerateConfigurationOutput) GoString() string { return s.String() } +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetRequestCharged(v string) *GetBucketAccelerateConfigurationOutput { + s.RequestCharged = &v + return s +} + // SetStatus sets the Status field's value. func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { s.Status = &v @@ -18727,12 +20758,21 @@ type GetBucketAclInput struct { // Specifies the S3 bucket whose ACL is being requested. // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -18864,9 +20904,9 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -19000,12 +21040,21 @@ type GetBucketCorsInput struct { // The bucket name for which to get the cors configuration. // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19130,9 +21179,9 @@ type GetBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19384,9 +21433,9 @@ type GetBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -19523,9 +21572,9 @@ type GetBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19648,9 +21697,9 @@ type GetBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19770,12 +21819,21 @@ type GetBucketLocationInput struct { // The name of the bucket for which to get the location. // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -19900,9 +21958,9 @@ type GetBucketLoggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20028,12 +22086,13 @@ type GetBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -20164,12 +22223,21 @@ type GetBucketNotificationConfigurationRequest struct { // The name of the bucket for which to get the notification configuration. // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20261,9 +22329,9 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20382,14 +22450,40 @@ func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipCont type GetBucketPolicyInput struct { _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` - // The bucket name for which to get the bucket policy. + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20512,9 +22606,9 @@ type GetBucketPolicyStatusInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20637,9 +22731,9 @@ type GetBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20763,9 +22857,9 @@ type GetBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20888,9 +22982,9 @@ type GetBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21015,9 +23109,9 @@ type GetBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21151,9 +23245,9 @@ type GetBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -21301,8 +23395,10 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -21311,9 +23407,9 @@ type GetObjectAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key of the object for which to get the ACL information. @@ -21322,13 +23418,19 @@ type GetObjectAclInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21447,6 +23549,8 @@ type GetObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -21491,27 +23595,41 @@ type GetObjectAttributesInput struct { // The name of the bucket that contains the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The object key. @@ -21522,8 +23640,8 @@ type GetObjectAttributesInput struct { // Sets the maximum number of parts to return. MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` - // An XML header that specifies the fields at the root level that you want returned - // in the response. Fields that you do not specify are not returned. + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. // // ObjectAttributes is a required field ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` @@ -21533,13 +23651,19 @@ type GetObjectAttributesInput struct { PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -21548,6 +23672,8 @@ type GetObjectAttributesInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectAttributesInput's // String and GoString methods. @@ -21556,9 +23682,16 @@ type GetObjectAttributesInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this + // API operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21720,6 +23853,8 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was (true) or was not (false) a delete // marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version @@ -21737,15 +23872,22 @@ type GetObjectAttributesOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Provides the storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. // // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -21843,6 +23985,15 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // + // * General purpose buckets - For GetObjectAttributes, if a additional checksum + // (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) isn't applied to the object specified in the + // request, the response doesn't return Part. + // + // * Directory buckets - For GetObjectAttributes, no matter whether a additional + // checksum is applied to the object specified in the request, the response + // returns Part. Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` // The total number of parts. @@ -21908,21 +24059,37 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -21934,25 +24101,50 @@ type GetObjectInput struct { // validation. This feature is available in the AWS SDK for Go v2. ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Return the object only if its entity tag (ETag) is the same as the one specified; - // otherwise, return a 412 (precondition failed) error. + // Return the object only if its entity tag (ETag) is the same as the one specified + // in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified status code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified HTTP status + // code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key of the object to get. @@ -21965,24 +24157,28 @@ type GetObjectInput struct { // Useful for downloading just a part of an object. PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // Downloads the specified byte range of an object. For more information about + // the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-range). // // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - // Sets the Content-Disposition header of the response + // Sets the Content-Disposition header of the response. ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` // Sets the Content-Encoding header of the response. @@ -21997,27 +24193,92 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - // Specifies the algorithm to use to when decrypting the object (for example, - // AES256). + // Specifies the algorithm to use when decrypting the object (for example, AES256). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 used to encrypt - // the data. This value is used to decrypt the object when recovering it and - // must match the one used when storing the data. The key must be appropriate - // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. + // Specifies the customer-provided encryption key that you originally provided + // for Amazon S3 to encrypt the data before storing it. This value is used to + // decrypt the object when recovering it and must match the one used when storing + // the data. The key must be appropriate for use with the algorithm specified + // in the x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectInput's // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // + // * If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. + // The s3:GetObject permission is not required in this scenario. + // + // * If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // + // * Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID + // is supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -22234,8 +24495,10 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to // retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -22244,9 +24507,9 @@ type GetObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object whose legal hold status you want to retrieve. @@ -22255,10 +24518,14 @@ type GetObjectLegalHoldInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose legal hold status you want to retrieve. @@ -22373,7 +24640,7 @@ type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` // The current legal hold status for the specified object. - LegalHold *ObjectLockLegalHold `type:"structure"` + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure"` } // String returns the string representation. @@ -22405,8 +24672,10 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -22415,9 +24684,9 @@ type GetObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -22535,55 +24804,49 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` - // Indicates that a range of bytes was specified. + // Indicates that a range of bytes was specified in the request. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. Body io.ReadCloser `type:"blob"` // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - // Specifies what content encodings have been applied to the object and thus + // Indicates what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` @@ -22600,23 +24863,40 @@ type GetObjectOutput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // + // * If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in + // the response. + // + // * If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An entity tag (ETag) is an opaque identifier assigned by a web server to // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Creation date of the object. + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -22626,20 +24906,29 @@ type GetObjectOutput struct { // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. + // This is set to the number of metadata entries not returned in the headers + // that are prefixed with x-amz-meta-. This can happen if you create metadata + // using an API like SOAP that supports more flexible metadata than the REST + // API. For example, using SOAP, you can create metadata whose values are not + // legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` // Indicates whether this object has an active legal hold. This field is only // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode currently in place for this object. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. This value is only returned if you specify @@ -22648,52 +24937,80 @@ type GetObjectOutput struct { // Amazon S3 can return this if your request involves a bucket that is either // a source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Provides information about object restoration action and expiration time // of the restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The number of tags, if any, on the object. + // The number of tags, if any, on the object, when you have the relevant permission + // to read object tags. + // + // You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` - // Version of the object. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -22937,8 +25254,10 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -22947,9 +25266,9 @@ type GetObjectRetentionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object whose retention settings you want to retrieve. @@ -22958,10 +25277,14 @@ type GetObjectRetentionInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -23076,7 +25399,7 @@ type GetObjectRetentionOutput struct { _ struct{} `type:"structure" payload:"Retention"` // The container element for an object's retention settings. - Retention *ObjectLockRetention `type:"structure"` + Retention *ObjectLockRetention `locationName:"Retention" type:"structure"` } // String returns the string representation. @@ -23108,27 +25431,30 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which to get the tagging information. @@ -23137,10 +25463,14 @@ type GetObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The versionId of the object for which to get the tagging information. @@ -23302,9 +25632,9 @@ type GetObjectTorrentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The object key for which to get the information. @@ -23313,10 +25643,14 @@ type GetObjectTorrentInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -23426,6 +25760,8 @@ type GetObjectTorrentOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -23468,9 +25804,9 @@ type GetPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -23800,27 +26136,48 @@ type HeadBucketInput struct { // The bucket name. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -23906,6 +26263,30 @@ func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { type HeadBucketOutput struct { _ struct{} `type:"structure"` + + // Indicates whether the bucket name used in the request is an access point + // alias. + // + // This functionality is not supported for directory buckets. + AccessPointAlias *bool `location:"header" locationName:"x-amz-access-point-alias" type:"boolean"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket + // is created. An example AZ ID value is usw2-az1. + // + // This functionality is only supported by directory buckets. + BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"` + + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. + BucketLocationType *string `location:"header" locationName:"x-amz-bucket-location-type" type:"string" enum:"LocationType"` + + // The Region that the bucket is located. + // + // This functionality is not supported for directory buckets. + BucketRegion *string `location:"header" locationName:"x-amz-bucket-region" type:"string"` } // String returns the string representation. @@ -23926,24 +26307,62 @@ func (s HeadBucketOutput) GoString() string { return s.String() } +// SetAccessPointAlias sets the AccessPointAlias field's value. +func (s *HeadBucketOutput) SetAccessPointAlias(v bool) *HeadBucketOutput { + s.AccessPointAlias = &v + return s +} + +// SetBucketLocationName sets the BucketLocationName field's value. +func (s *HeadBucketOutput) SetBucketLocationName(v string) *HeadBucketOutput { + s.BucketLocationName = &v + return s +} + +// SetBucketLocationType sets the BucketLocationType field's value. +func (s *HeadBucketOutput) SetBucketLocationType(v string) *HeadBucketOutput { + s.BucketLocationType = &v + return s +} + +// SetBucketRegion sets the BucketRegion field's value. +func (s *HeadBucketOutput) SetBucketRegion(v string) *HeadBucketOutput { + s.BucketRegion = &v + return s +} + type HeadObjectInput struct { _ struct{} `locationName:"HeadObjectRequest" type:"structure"` - // The name of the bucket containing the object. + // The name of the bucket that contains the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -23956,25 +26375,69 @@ type HeadObjectInput struct { // must have permission to use the kms:Decrypt action for the request to succeed. ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Return the object only if its entity tag (ETag) is the same as the one specified; // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` // Return the object only if it has been modified since the specified time; // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` // Return the object only if it has not been modified since the specified time; // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // The object key. @@ -23988,19 +26451,43 @@ type HeadObjectInput struct { // object. PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` - // Because HeadObject returns only the metadata for an object, this parameter - // has no effect. + // HeadObject returns only the metadata for an object. If the Range is satisfiable, + // only the ContentLength is affected in the response. If the Range is not satisfiable, + // S3 returns a 416 - Requested Range Not Satisfiable error. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -24009,6 +26496,8 @@ type HeadObjectInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by HeadObjectInput's // String and GoString methods. @@ -24017,9 +26506,14 @@ type HeadObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -24136,6 +26630,42 @@ func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { return s } +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *HeadObjectInput) SetResponseCacheControl(v string) *HeadObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *HeadObjectInput) SetResponseContentDisposition(v string) *HeadObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *HeadObjectInput) SetResponseContentEncoding(v string) *HeadObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *HeadObjectInput) SetResponseContentLanguage(v string) *HeadObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *HeadObjectInput) SetResponseContentType(v string) *HeadObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *HeadObjectInput) SetResponseExpires(v time.Time) *HeadObjectInput { + s.ResponseExpires = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { s.SSECustomerAlgorithm = &v @@ -24201,51 +26731,63 @@ type HeadObjectOutput struct { AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - // Specifies what content encodings have been applied to the object and thus + // Indicates what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced // by the Content-Type header field. ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` @@ -24261,21 +26803,27 @@ type HeadObjectOutput struct { // Specifies whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An entity tag (ETag) is an opaque identifier assigned by a web server to // a specific version of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -24289,6 +26837,8 @@ type HeadObjectOutput struct { // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, // you can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` // Specifies whether a legal hold is in effect for this object. This header @@ -24296,15 +26846,21 @@ type HeadObjectOutput struct { // This header is not returned if the specified version of this object has never // had a legal hold applied. For more information about S3 Object Lock, see // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode, if any, that's in effect for this object. This header // is only returned if the requester has the s3:GetObjectRetention permission. // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when the Object Lock retention period expires. This header // is only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. This value is only returned if you specify @@ -24343,10 +26899,14 @@ type HeadObjectOutput struct { // header will return FAILED. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + // + // This functionality is not supported for directory buckets. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If the object is an archived object (an object whose storage class is GLACIER), @@ -24364,45 +26924,61 @@ type HeadObjectOutput struct { // // For more information about archiving objects, see Transitioning Objects: // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by HeadObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If the object is stored using server-side encryption either with an Amazon - // Web Services KMS key or an Amazon S3-managed encryption key, the response - // includes this header with the value of the server-side encryption algorithm - // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. // // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // Version of the object. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -24633,9 +27209,9 @@ type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request - // to samplebucket/images/ the data that is returned will be for the object - // with the key name images/index.html) The suffix must not be empty and must + // endpoint. (For example, if the suffix is index.html and you make a request + // to samplebucket/images/, the data that is returned will be for the object + // with the key name images/index.html.) The suffix must not be empty and must // not include a slash character. // // Replacement must be made for object keys containing special characters (such @@ -24688,10 +27264,16 @@ type Initiator struct { _ struct{} `type:"structure"` // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string `type:"string"` // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, + // it provides a user ARN value. ID *string `type:"string"` } @@ -25596,7 +28178,8 @@ type LambdaFunctionConfiguration struct { Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -25670,6 +28253,9 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc } // Container for lifecycle rules. You can add as many as 1000 rules. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -25727,11 +28313,14 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { } // Container for the expiration for the lifecycle of the object. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleExpiration struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` // Indicates the lifetime, in days, of the objects that are subject to the rule. @@ -25782,13 +28371,16 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp } // A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleRule struct { _ struct{} `type:"structure"` // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` @@ -26011,7 +28603,9 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { } // The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. +// A Filter can have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, +// or And specified. If the Filter element is left empty, the Lifecycle Rule +// applies to all objects in the bucket. type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -26117,9 +28711,9 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -26443,9 +29037,9 @@ type ListBucketInventoryConfigurationsInput struct { // that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -26612,9 +29206,9 @@ type ListBucketMetricsConfigurationsInput struct { // value that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -26830,24 +29424,123 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } +type ListDirectoryBucketsInput struct { + _ struct{} `locationName:"ListDirectoryBucketsRequest" type:"structure"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // Maximum number of buckets to be returned in response. When the number is + // more than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int64 `location:"querystring" locationName:"max-directory-buckets" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsInput) SetContinuationToken(v string) *ListDirectoryBucketsInput { + s.ContinuationToken = &v + return s +} + +// SetMaxDirectoryBuckets sets the MaxDirectoryBuckets field's value. +func (s *ListDirectoryBucketsInput) SetMaxDirectoryBuckets(v int64) *ListDirectoryBucketsInput { + s.MaxDirectoryBuckets = &v + return s +} + +type ListDirectoryBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListDirectoryBucketsOutput) SetBuckets(v []*Bucket) *ListDirectoryBucketsOutput { + s.Buckets = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsOutput) SetContinuationToken(v string) *ListDirectoryBucketsOutput { + s.ContinuationToken = &v + return s +} + type ListMultipartUploadsInput struct { _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` // The name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -26861,30 +29554,40 @@ type ListMultipartUploadsInput struct { // parameter, then the substring starts at the beginning of the key. The keys // that are grouped under CommonPrefixes result element are not returned elsewhere // in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. + // Specifies the multipart upload after which listing should begin. // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. + // * General purpose buckets - For general purpose buckets, key-marker is + // an object key. Together with upload-id-marker, this parameter specifies + // the multipart upload after which listing should begin. If upload-id-marker + // is not specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, + // any multipart uploads for a key equal to the key-marker might also be + // included, provided those multipart uploads have upload IDs lexicographically + // greater than the specified upload-id-marker. // - // If upload-id-marker is specified, any multipart uploads for a key equal to - // the key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker. + // * Directory buckets - For directory buckets, key-marker is obfuscated + // and isn't a real object key. The upload-id-marker parameter isn't supported + // by directory buckets. To list the additional multipart uploads, you only + // need to set the value of key-marker to the NextKeyMarker value from the + // previous response. In the ListMultipartUploads response, the multipart + // uploads aren't sorted lexicographically based on the object keys. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -26894,15 +29597,31 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -26989,6 +29708,12 @@ func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInp return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListMultipartUploadsInput) SetRequestPayer(v string) *ListMultipartUploadsInput { + s.RequestPayer = &v + return s +} + // SetUploadIdMarker sets the UploadIdMarker field's value. func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { s.UploadIdMarker = &v @@ -27032,17 +29757,22 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each distinct // key prefix containing the delimiter in a CommonPrefixes element. The distinct // key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Contains the delimiter you specified in the request. If you don't specify // a delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: // // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` @@ -27066,13 +29796,30 @@ type ListMultipartUploadsOutput struct { // When a list is truncated, this element specifies the value that should be // used for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string `type:"string"` // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `type:"string"` - // Upload ID after which listing began. + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. UploadIdMarker *string `type:"string"` // Container for elements related to a particular multipart upload. A response @@ -27165,6 +29912,12 @@ func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOu return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListMultipartUploadsOutput) SetRequestCharged(v string) *ListMultipartUploadsOutput { + s.RequestCharged = &v + return s +} + // SetUploadIdMarker sets the UploadIdMarker field's value. func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { s.UploadIdMarker = &v @@ -27193,22 +29946,22 @@ type ListObjectVersionsInput struct { Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains @@ -27216,13 +29969,28 @@ type ListObjectVersionsInput struct { // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Use this parameter to select only those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different groupings - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) You can use prefix with delimiter to roll - // up numerous objects into a single result under CommonPrefixes. + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) You can use prefix with delimiter to + // roll up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Specifies the object version you want to start listing from. VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } @@ -27304,12 +30072,24 @@ func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectVersionsInput) SetOptionalObjectAttributes(v []*string) *ListObjectVersionsInput { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { s.Prefix = &v return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectVersionsInput) SetRequestPayer(v string) *ListObjectVersionsInput { + s.RequestPayer = &v + return s +} + // SetVersionIdMarker sets the VersionIdMarker field's value. func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { s.VersionIdMarker = &v @@ -27363,16 +30143,16 @@ type ListObjectVersionsOutput struct { // Encoding type used by Amazon S3 to encode object key names in the XML response. // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: // // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` // A flag that indicates whether Amazon S3 returned all of the results that // satisfied the search criteria. If your results were truncated, you can make - // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // a follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` @@ -27400,6 +30180,12 @@ type ListObjectVersionsOutput struct { // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` @@ -27491,6 +30277,12 @@ func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectVersionsOutput) SetRequestCharged(v string) *ListObjectVersionsOutput { + s.RequestCharged = &v + return s +} + // SetVersionIdMarker sets the VersionIdMarker field's value. func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { s.VersionIdMarker = &v @@ -27508,49 +30300,67 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts // listing after this specified key. Marker can be any key in the bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Limits the response to keys that begin with the specified prefix. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` @@ -27637,6 +30447,12 @@ func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsInput) SetOptionalObjectAttributes(v []*string) *ListObjectsInput { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { s.Prefix = &v @@ -27690,7 +30506,7 @@ type ListObjectsOutput struct { // CommonPrefixes lists keys that act like subdirectories in the directory specified // by Prefix. // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // For example, if the prefix is notes/ and the delimiter is a slash (/), as // in notes/summer/july, the common prefix is notes/summer/. All of the keys // that roll up into a common prefix count as a single return when calculating // the number of returns. @@ -27706,7 +30522,9 @@ type ListObjectsOutput struct { // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType *string `type:"string" enum:"EncodingType"` // A flag that indicates whether Amazon S3 returned all of the results that @@ -27723,17 +30541,26 @@ type ListObjectsOutput struct { // The bucket name. Name *string `type:"string"` - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMarker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. + // When the response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as the marker parameter + // in the subsequent request to get the next set of objects. Amazon S3 lists + // objects in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it + // is truncated, you can use the value of the last Key element in the response + // as the marker parameter in the subsequent request to get the next set of + // object keys. NextMarker *string `type:"string"` // Keys that begin with the indicated prefix. Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation. @@ -27814,65 +30641,110 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput { + s.RequestCharged = &v + return s +} + type ListObjectsV2Input struct { _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Bucket name to list. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. + // + // * Directory buckets - For directory buckets, / is the only supported delimiter. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true. + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true. + // + // Directory buckets - For directory buckets, the bucket owner is returned as + // the object owner for all objects. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that she or he will be charged for the // list objects request in V2 style. Bucket owners need not specify this parameter // in their requests. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -27959,6 +30831,12 @@ func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsV2Input) SetOptionalObjectAttributes(v []*string) *ListObjectsV2Input { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { s.Prefix = &v @@ -28007,8 +30885,9 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { type ListObjectsV2Output struct { _ struct{} `type:"structure"` - // All of the keys (up to 1,000) rolled up into a common prefix count as a single - // return when calculating the number of returns. + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group + // of keys is considered as one item. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -28022,12 +30901,24 @@ type ListObjectsV2Output struct { // in notes/summer/july, the common prefix is notes/summer/. All of the keys // that roll up into a common prefix count as a single return when calculating // the number of returns. + // + // * Directory buckets - For directory buckets, only prefixes that end in + // a delimiter (/) are supported. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. ContinuationToken *string `type:"string"` // Causes keys that contain the same string between the prefix and the first @@ -28035,6 +30926,8 @@ type ListObjectsV2Output struct { // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere // in the response. Each rolled-up result counts as only one return against // the MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object key names in the XML response. @@ -28052,31 +30945,16 @@ type ListObjectsV2Output struct { IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than or equals to MaxKeys field. Say you ask for 50 keys, - // your result will include less than equals 50 keys + // always be less than or equal to the MaxKeys field. For example, if you ask + // for 50 keys, your result will include 50 keys or fewer. KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `type:"integer"` // The bucket name. - // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true, which means there @@ -28086,9 +30964,20 @@ type ListObjectsV2Output struct { NextContinuationToken *string `type:"string"` // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. Prefix *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string `type:"string"` } @@ -28176,6 +31065,12 @@ func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { return s } +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsV2Output) SetRequestCharged(v string) *ListObjectsV2Output { + s.RequestCharged = &v + return s +} + // SetStartAfter sets the StartAfter field's value. func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { s.StartAfter = &v @@ -28187,27 +31082,41 @@ type ListPartsInput struct { // The name of the bucket to which the parts are being uploaded. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -28223,16 +31132,22 @@ type ListPartsInput struct { PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // The server-side encryption (SSE) customer managed key. This parameter is @@ -28240,6 +31155,8 @@ type ListPartsInput struct { // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListPartsInput's // String and GoString methods. @@ -28249,6 +31166,8 @@ type ListPartsInput struct { // is needed only when the object was created using a checksum algorithm. For // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -28409,15 +31328,19 @@ type ListPartsOutput struct { // name in the request, then the response includes this header indicating when // the initiated multipart upload will become eligible for abort operation. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // The response will also include the x-amz-abort-rule-id header that will provide // the ID of the lifecycle configuration rule that defines this action. + // + // This functionality is not supported for directory buckets. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // The name of the bucket to which the multipart upload was initiated. Does @@ -28452,11 +31375,13 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is created. // If multipart upload is initiated by an IAM user, this element provides the // parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the parts. Owner *Owner `type:"structure"` - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *int64 `type:"integer"` // Container for elements related to a particular part. A response can contain @@ -28465,10 +31390,14 @@ type ListPartsOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -28726,6 +31655,56 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone. For more information +// about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type LocationInfo struct { + _ struct{} `type:"structure"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the AZ ID of the Availability + // Zone where the bucket will be created. An example AZ ID value is usw2-az1. + Name *string `type:"string"` + + // The type of location where the bucket will be created. + Type *string `type:"string" enum:"LocationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LocationInfo) SetName(v string) *LocationInfo { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *LocationInfo) SetType(v string) *LocationInfo { + s.Type = &v + return s +} + // Describes where logs are stored and the prefix that Amazon S3 assigns to // all log object keys for a bucket. For more information, see PUT Bucket logging // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) @@ -28751,6 +31730,9 @@ type LoggingEnabled struct { // in the Amazon S3 User Guide. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat `type:"structure"` + // A prefix for all log object keys. If you store log files from multiple Amazon // S3 buckets in a single bucket, you can use a prefix to distinguish which // log files came from which bucket. @@ -28815,6 +31797,12 @@ func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { return s } +// SetTargetObjectKeyFormat sets the TargetObjectKeyFormat field's value. +func (s *LoggingEnabled) SetTargetObjectKeyFormat(v *TargetObjectKeyFormat) *LoggingEnabled { + s.TargetObjectKeyFormat = v + return s +} + // SetTargetPrefix sets the TargetPrefix field's value. func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { s.TargetPrefix = &v @@ -28825,10 +31813,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` - // Name of the Object. + // Name of the object. Name *string `type:"string"` - // Value of the Object. + // Value of the object. Value *string `type:"string"` } @@ -29006,7 +31994,8 @@ type MetricsConfiguration struct { // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). Filter *MetricsFilter `type:"structure"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `type:"string" required:"true"` @@ -29161,9 +32150,15 @@ type MultipartUpload struct { Key *string `min:"1" type:"string"` // Specifies the owner of the object that is part of the multipart upload. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the objects. Owner *Owner `type:"structure"` // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID that identifies the multipart upload. @@ -29238,9 +32233,10 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete + // any additional noncurrent versions beyond the specified number to retain. + // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` @@ -29293,10 +32289,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` - // Specifies how many noncurrent versions Amazon S3 will retain. If there are - // this many more recent noncurrent versions, Amazon S3 will take the associated - // action. For more information about noncurrent versions, see Lifecycle configuration - // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` @@ -29504,7 +32501,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// filtering, see Configuring event notifications using object key name filtering +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -29564,6 +32562,8 @@ type Object struct { // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, // and therefore the ETag will not be an MD5 digest. + // + // Directory buckets - MD5 is not supported by directory buckets. ETag *string `type:"string"` // The name that you assign to an object. You use the object key to retrieve @@ -29574,12 +32574,27 @@ type Object struct { LastModified *time.Time `type:"timestamp"` // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner `type:"structure"` + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + RestoreStatus *RestoreStatus `type:"structure"` + // Size in bytes of the object - Size *int64 `type:"integer"` + Size *int64 `type:"long"` // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. StorageClass *string `type:"string" enum:"ObjectStorageClass"` } @@ -29631,6 +32646,12 @@ func (s *Object) SetOwner(v *Owner) *Object { return s } +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *Object) SetRestoreStatus(v *RestoreStatus) *Object { + s.RestoreStatus = v + return s +} + // SetSize sets the Size field's value. func (s *Object) SetSize(v int64) *Object { s.Size = &v @@ -29656,7 +32677,9 @@ type ObjectIdentifier struct { // Key is a required field Key *string `min:"1" type:"string" required:"true"` - // VersionId for the specific version of the object to delete. + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string `type:"string"` } @@ -29871,26 +32894,32 @@ type ObjectPart struct { ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `type:"string"` @@ -29899,7 +32928,7 @@ type ObjectPart struct { PartNumber *int64 `type:"integer"` // The size of the uploaded part in bytes. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` } // String returns the string representation. @@ -29973,14 +33002,21 @@ type ObjectVersion struct { // The object key. Key *string `min:"1" type:"string"` - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time `type:"timestamp"` // Specifies the owner of the object. Owner *Owner `type:"structure"` + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus `type:"structure"` + // Size in bytes of the object. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` // The class of storage used to store the object. StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` @@ -30043,6 +33079,12 @@ func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { return s } +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *ObjectVersion) SetRestoreStatus(v *RestoreStatus) *ObjectVersion { + s.RestoreStatus = v + return s +} + // SetSize sets the Size field's value. func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { s.Size = &v @@ -30153,7 +33195,26 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { type Owner struct { _ struct{} `type:"structure"` - // Container for the display name of the owner. + // Container for the display name of the owner. This value is only supported + // in the following Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // This functionality is not supported for directory buckets. DisplayName *string `type:"string"` // Container for the ID of the owner. @@ -30263,8 +33324,19 @@ type OwnershipControlsRule struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that - // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control - // canned ACL or an equivalent form of this ACL expressed in the XML format. + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. // // ObjectOwnership is a required field ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` @@ -30342,18 +33414,22 @@ type Part struct { ChecksumCRC32 *string `type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `type:"string"` @@ -30375,7 +33451,7 @@ type Part struct { PartNumber *int64 `type:"integer"` // Size in bytes of the uploaded part data. - Size *int64 `type:"integer"` + Size *int64 `type:"long"` } // String returns the string representation. @@ -30444,6 +33520,44 @@ func (s *Part) SetSize(v int64) *Part { return s } +// Amazon S3 keys for log objects are partitioned in the following format: +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// PartitionedPrefix defaults to EventTime delivery when server access logs +// are delivered. +type PartitionedPrefix struct { + _ struct{} `locationName:"PartitionedPrefix" type:"structure"` + + // Specifies the partition date source for the partitioned prefix. PartitionDateSource + // can be EventTime or DeliveryTime. + PartitionDateSource *string `type:"string" enum:"PartitionDateSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) GoString() string { + return s.String() +} + +// SetPartitionDateSource sets the PartitionDateSource field's value. +func (s *PartitionedPrefix) SetPartitionDateSource(v string) *PartitionedPrefix { + s.PartitionDateSource = &v + return s +} + // The container element for a bucket's policy status. type PolicyStatus struct { _ struct{} `type:"structure"` @@ -30691,12 +33805,12 @@ type PutBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -30708,9 +33822,9 @@ type PutBucketAccelerateConfigurationInput struct { // must be populated with the algorithm's checksum of the request payload. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -30845,12 +33959,12 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -30866,9 +33980,9 @@ type PutBucketAclInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -31059,9 +34173,9 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID that identifies the analytics configuration. @@ -31211,12 +34325,12 @@ type PutBucketCorsInput struct { // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31232,9 +34346,9 @@ type PutBucketCorsInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -31364,20 +34478,23 @@ type PutBucketEncryptionInput struct { _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by + // using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) + // or a customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31393,9 +34510,9 @@ type PutBucketEncryptionInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Specifies the default server-side-encryption configuration. @@ -31673,9 +34790,9 @@ type PutBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The ID used to identify the inventory configuration. @@ -31822,12 +34939,12 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31843,9 +34960,9 @@ type PutBucketLifecycleConfigurationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1,000 rules. @@ -31977,12 +35094,12 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -31998,12 +35115,15 @@ type PutBucketLifecycleInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for lifecycle rules. You can add as many as 1000 rules. + // + // For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) + // in the Amazon S3 User Guide. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -32139,12 +35259,12 @@ type PutBucketLoggingInput struct { // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32160,9 +35280,9 @@ type PutBucketLoggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -32296,12 +35416,13 @@ type PutBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -32445,9 +35566,9 @@ type PutBucketNotificationConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for specifying the notification configuration of the bucket. @@ -32591,12 +35712,12 @@ type PutBucketNotificationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -32612,9 +35733,9 @@ type PutBucketNotificationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The container for the configuration. @@ -32748,9 +35869,9 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) @@ -32881,19 +36002,45 @@ type PutBucketPolicyInput struct { // The name of the bucket. // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -32907,15 +36054,24 @@ type PutBucketPolicyInput struct { // Set this parameter to true to confirm that you want to remove your permissions // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The bucket policy as a JSON document. // + // For directory buckets, the only IAM action supported in the bucket policy + // is s3express:CreateSession. + // // Policy is a required field Policy *string `type:"string" required:"true"` } @@ -33051,12 +36207,12 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33072,9 +36228,9 @@ type PutBucketReplicationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // A container for replication rules. You can add up to 1,000 rules. The maximum @@ -33223,12 +36379,12 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33244,9 +36400,9 @@ type PutBucketRequestPaymentInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for Payer. @@ -33385,12 +36541,12 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33406,9 +36562,9 @@ type PutBucketTaggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the TagSet and Tag elements. @@ -33547,12 +36703,12 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33568,9 +36724,9 @@ type PutBucketVersioningInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The concatenation of the authentication device's serial number, a space, @@ -33714,12 +36870,12 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33735,9 +36891,9 @@ type PutBucketWebsiteInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Container for the request. @@ -33881,22 +37037,33 @@ type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the // ACL. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -33912,25 +37079,25 @@ type PutObjectAclInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to list the objects in the bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the bucket ACL. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to create new objects in the bucket. @@ -33941,37 +37108,28 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. // - // This action is not supported by Amazon S3 on Outposts. + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Key for which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. - // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. - // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -34137,6 +37295,8 @@ type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -34168,9 +37328,32 @@ type PutObjectInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. // - // This action is not supported by Amazon S3 on Outposts. + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. @@ -34178,31 +37361,47 @@ type PutObjectInput struct { // The bucket name to which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with a PUT action doesn’t affect bucket-level settings // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -34210,16 +37409,33 @@ type PutObjectInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -34256,21 +37472,21 @@ type PutObjectInput struct { ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + // body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The base64-encoded 128-bit MD5 digest of the message (without the headers) @@ -34279,39 +37495,54 @@ type PutObjectInput struct { // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end // integrity check. For more information about REST request authentication, // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the PUT action was initiated. @@ -34323,25 +37554,37 @@ type PutObjectInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want this object's Object Lock to expire. Must // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -34350,6 +37593,8 @@ type PutObjectInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. @@ -34358,51 +37603,81 @@ type PutObjectInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms, but - // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses - // the Amazon Web Services managed key to protect the data. If the KMS key does - // not exist in the same account issuing the command, you must use the full - // ARN and not just the ID. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectInput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. // // In the following example, the request header sets the redirect to an object // (anotherPage.html) in the same bucket: @@ -34416,7 +37691,10 @@ type PutObjectInput struct { // // For more information about website hosting in Amazon S3, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -34729,8 +38007,10 @@ type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold // on. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -34739,12 +38019,12 @@ type PutObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -34760,9 +38040,9 @@ type PutObjectLegalHoldInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object that you want to place a legal hold on. @@ -34775,10 +38055,14 @@ type PutObjectLegalHoldInput struct { LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a legal hold on. @@ -34906,6 +38190,8 @@ type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -34941,12 +38227,12 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -34962,19 +38248,23 @@ type PutObjectLockConfigurationInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // A token to allow Object Lock to be enabled for an existing bucket. @@ -35090,6 +38380,8 @@ type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -35121,91 +38413,134 @@ type PutObjectOutput struct { _ struct{} `type:"structure"` // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing + // the network, for objects where the ETag is the MD5 digest of the object, + // you can calculate the MD5 while putting an object to Amazon S3 and compare + // the returned ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the + // MD5 digest of the object. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the expiration is configured for the object (see PutBucketLifecycleConfiguration - // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value - // of the rule-id is URL-encoded. + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)) + // in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the Amazon Web Services KMS Encryption Context to use + // If present, indicates the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 - // string holding JSON with the encryption context key-value pairs. + // string holding JSON with the encryption context key-value pairs. This value + // is stored as object metadata and automatically gets passed on to Amazon Web + // Services KMS for future GetObject or CopyObject operations on this object. + // + // This functionality is not supported for directory buckets. // // SSEKMSEncryptionContext is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutObjectOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // If you specified server-side encryption either with an Amazon Web Services - // KMS key or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 - // used to encrypt the object. + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates + // a unique version ID for the object being stored. Amazon S3 returns this ID + // in the response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all + // of the objects. For more information about versioning, see Adding Objects + // to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + // + // This functionality is not supported for directory buckets. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -35317,8 +38652,10 @@ type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object Retention // configuration to. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) @@ -35330,12 +38667,12 @@ type PutObjectRetentionInput struct { // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35351,9 +38688,9 @@ type PutObjectRetentionInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The key name for the object that you want to apply this Object Retention @@ -35363,10 +38700,14 @@ type PutObjectRetentionInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -35504,6 +38845,8 @@ type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } @@ -35536,30 +38879,33 @@ type PutObjectTaggingInput struct { // The bucket name containing the object. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35575,9 +38921,9 @@ type PutObjectTaggingInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Name of the object key. @@ -35586,10 +38932,14 @@ type PutObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for the TagSet and Tag elements @@ -35765,12 +39115,12 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -35786,9 +39136,9 @@ type PutPublicAccessBlockInput struct { // to be used. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The PublicAccessBlock configuration that you want to apply to this Amazon @@ -35929,7 +39279,8 @@ type QueueConfiguration struct { Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -36922,30 +40273,33 @@ type RestoreObjectInput struct { // The bucket name containing the object to restore. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -36957,9 +40311,9 @@ type RestoreObjectInput struct { // must be populated with the algorithm's checksum of the request payload. ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the action was initiated. @@ -36968,10 +40322,14 @@ type RestoreObjectInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. @@ -37107,6 +40465,8 @@ type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // Indicates the path in the provided S3 output location where Select results @@ -37260,6 +40620,67 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is currently being restored. If the object restoration + // is in progress, the header returns the value TRUE. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE. + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. + IsRestoreInProgress *bool `type:"boolean"` + + // Indicates when the restored copy will expire. This value is populated only + // if the object has already been restored. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) GoString() string { + return s.String() +} + +// SetIsRestoreInProgress sets the IsRestoreInProgress field's value. +func (s *RestoreStatus) SetIsRestoreInProgress(v bool) *RestoreStatus { + s.IsRestoreInProgress = &v + return s +} + +// SetRestoreExpiryDate sets the RestoreExpiryDate field's value. +func (s *RestoreStatus) SetRestoreExpiryDate(v time.Time) *RestoreStatus { + s.RestoreExpiryDate = &v + return s +} + // Specifies the redirect behavior and when a redirect is applied. For more // information about routing rules, see Configuring advanced conditional redirects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) @@ -37334,7 +40755,7 @@ type Rule struct { // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` @@ -37469,9 +40890,8 @@ func (s *Rule) SetTransition(v *Transition) *Rule { type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the Amazon Web Services Key Management Service (Amazon - // Web Services KMS) symmetric customer managed key to use for encrypting inventory - // reports. + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. // // KeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by SSEKMS's @@ -37775,9 +41195,9 @@ type SelectObjectContentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The expression that is used to query the object. @@ -38154,21 +41574,26 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if - // and only if SSEAlgorithm is set to aws:kms. + // and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. // - // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. - // However, if you are using encryption with cross-account or Amazon Web Services - // service operations you must use a fully qualified KMS key ARN. For more information, - // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). - // - // For example: + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) + // of the KMS key. // // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // - // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For - // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // * Key Alias: alias/alias-name + // + // If you use a key ID, you can run into a LogDestination undeliverable error + // when creating a VPC flow log. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. // // KMSMasterKeyID is a sensitive parameter and its value will be @@ -38347,6 +41772,118 @@ func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryp return s } +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + _ struct{} `type:"structure"` + + // A unique identifier that's associated with a secret access key. The access + // key ID and the secret access key are used together to sign programmatic Amazon + // Web Services requests cryptographically. + // + // AccessKeyId is a required field + AccessKeyId *string `locationName:"AccessKeyId" type:"string" required:"true"` + + // Temporary security credentials expire after a specified interval. After temporary + // credentials expire, any calls that you make with those credentials will fail. + // So you must generate a new set of temporary credentials. Temporary credentials + // cannot be extended or refreshed beyond the original specified interval. + // + // Expiration is a required field + Expiration *time.Time `locationName:"Expiration" type:"timestamp" required:"true"` + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `locationName:"SecretAccessKey" type:"string" required:"true" sensitive:"true"` + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SessionToken is a required field + SessionToken *string `locationName:"SessionToken" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials { + s.SessionToken = &v + return s +} + +// To use simple format for S3 keys for log objects, set SimplePrefix to an +// empty object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + _ struct{} `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) GoString() string { + return s.String() +} + // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter @@ -38883,6 +42420,49 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix +// or SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + _ struct{} `type:"structure"` + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix `locationName:"PartitionedPrefix" type:"structure"` + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) GoString() string { + return s.String() +} + +// SetPartitionedPrefix sets the PartitionedPrefix field's value. +func (s *TargetObjectKeyFormat) SetPartitionedPrefix(v *PartitionedPrefix) *TargetObjectKeyFormat { + s.PartitionedPrefix = v + return s +} + +// SetSimplePrefix sets the SimplePrefix field's value. +func (s *TargetObjectKeyFormat) SetSimplePrefix(v *SimplePrefix) *TargetObjectKeyFormat { + s.SimplePrefix = v + return s +} + // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access // tier, without additional operational overhead. @@ -38966,7 +42546,8 @@ type TopicConfiguration struct { Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -39166,19 +42747,33 @@ type UploadPartCopyInput struct { // The bucket name. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -39200,34 +42795,81 @@ type UploadPartCopyInput struct { // my-access-point owned by account 123456789012 in Region us-west-2, use // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. // The value must be URL encoded. Amazon S3 supports copy operations using - // access points only when the source and destination buckets are in the - // same Amazon Web Services Region. Alternatively, for objects accessed through - // Amazon S3 on Outposts, specify the ARN of the object as accessed in the - // format arn:aws:s3-outposts:::outpost//object/. + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. // For example, to copy the object reports/january.pdf through outpost my-outpost // owned by account 123456789012 in Region us-west-2, use the URL encoding // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. // The value must be URL-encoded. // - // To copy a specific version of an object, append ?versionId= to - // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of - // the source object. + // If your bucket has versioning enabled, you could have multiple versions of + // the same object. By default, x-amz-copy-source identifies the current version + // of the source object to copy. To copy a specific version of the source object + // to copy, append ?versionId= to the x-amz-copy-source request + // header (for example, x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // + // If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns + // an HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use @@ -39239,12 +42881,18 @@ type UploadPartCopyInput struct { // Specifies the algorithm to use when decrypting the source object (for example, // AES256). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. // + // This functionality is not supported when the source object is in a directory + // bucket. + // // CopySourceSSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyInput's // String and GoString methods. @@ -39253,16 +42901,19 @@ type UploadPartCopyInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account ID of the expected source bucket owner. If the source bucket - // is owned by a different account, the request fails with the HTTP status code - // 403 Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -39277,14 +42928,20 @@ type UploadPartCopyInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -39294,6 +42951,9 @@ type UploadPartCopyInput struct { // header. This must be the same encryption key specified in the initiate multipart // upload request. // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyInput's // String and GoString methods. @@ -39302,6 +42962,9 @@ type UploadPartCopyInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -39525,7 +43188,9 @@ type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Container for all response elements. @@ -39533,33 +43198,46 @@ type UploadPartCopyOutput struct { // The version of the source object that was copied, if you have enabled versioning // on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // the object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartCopyOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -39637,30 +43315,44 @@ type UploadPartInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm @@ -39710,11 +43402,13 @@ type UploadPartInput struct { // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // Object key for which the multipart upload was initiated. @@ -39729,14 +43423,19 @@ type UploadPartInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -39746,6 +43445,8 @@ type UploadPartInput struct { // header. This must be the same encryption key specified in the initiate multipart // upload request. // + // This functionality is not supported for directory buckets. + // // SSECustomerKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartInput's // String and GoString methods. @@ -39754,6 +43455,8 @@ type UploadPartInput struct { // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -39955,38 +43658,48 @@ type UploadPartOutput struct { _ struct{} `type:"structure"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` // The base64-encoded, 32-bit CRC32C checksum of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` // The base64-encoded, 256-bit SHA-256 digest of the object. This will only - // be present if it was uploaded with the object. With multipart uploads, this - // may not be a checksum value of the object. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` @@ -39995,29 +43708,39 @@ type UploadPartOutput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity + // the response will include this header to provide the round-trip message integrity // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UploadPartOutput's // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon + // The server-side encryption algorithm used when you store this object in Amazon // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -40401,6 +44124,8 @@ type WriteGetObjectResponseInput struct { // If present, indicates that the requester was successfully charged for the // request. + // + // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` // Route prefix to the HTTP URL generated. @@ -40427,9 +44152,9 @@ type WriteGetObjectResponseInput struct { // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // stored in Amazon S3 object. + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon + // Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. // // SSEKMSKeyId is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's @@ -40441,9 +44166,7 @@ type WriteGetObjectResponseInput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The integer status code for an HTTP response of a corresponding GetObject - // request. - // - // Status Codes + // request. The following is a list of status codes. // // * 200 - OK // @@ -40879,6 +44602,9 @@ const ( // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintApSouth1 = "ap-south-1" + // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth2 = "ap-south-2" + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value BucketLocationConstraintApSoutheast1 = "ap-southeast-1" @@ -40909,6 +44635,9 @@ const ( // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintEuSouth1 = "eu-south-1" + // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth2 = "eu-south-2" + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value BucketLocationConstraintEuWest1 = "eu-west-1" @@ -40949,6 +44678,7 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintApNortheast2, BucketLocationConstraintApNortheast3, BucketLocationConstraintApSouth1, + BucketLocationConstraintApSouth2, BucketLocationConstraintApSoutheast1, BucketLocationConstraintApSoutheast2, BucketLocationConstraintApSoutheast3, @@ -40959,6 +44689,7 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintEuCentral1, BucketLocationConstraintEuNorth1, BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuSouth2, BucketLocationConstraintEuWest1, BucketLocationConstraintEuWest2, BucketLocationConstraintEuWest3, @@ -40992,6 +44723,18 @@ func BucketLogsPermission_Values() []string { } } +const ( + // BucketTypeDirectory is a BucketType enum value + BucketTypeDirectory = "Directory" +) + +// BucketType_Values returns all elements of the BucketType enum +func BucketType_Values() []string { + return []string{ + BucketTypeDirectory, + } +} + const ( // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value BucketVersioningStatusEnabled = "Enabled" @@ -41064,6 +44807,18 @@ func CompressionType_Values() []string { } } +const ( + // DataRedundancySingleAvailabilityZone is a DataRedundancy enum value + DataRedundancySingleAvailabilityZone = "SingleAvailabilityZone" +) + +// DataRedundancy_Values returns all elements of the DataRedundancy enum +func DataRedundancy_Values() []string { + return []string{ + DataRedundancySingleAvailabilityZone, + } +} + const ( // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value DeleteMarkerReplicationStatusEnabled = "Enabled" @@ -41081,8 +44836,8 @@ func DeleteMarkerReplicationStatus_Values() []string { } // Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters +// the encoding method to use. An object key can contain any Unicode character; +// however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. @@ -41418,6 +45173,12 @@ const ( // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" + + // InventoryOptionalFieldObjectAccessControlList is a InventoryOptionalField enum value + InventoryOptionalFieldObjectAccessControlList = "ObjectAccessControlList" + + // InventoryOptionalFieldObjectOwner is a InventoryOptionalField enum value + InventoryOptionalFieldObjectOwner = "ObjectOwner" ) // InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum @@ -41436,6 +45197,8 @@ func InventoryOptionalField_Values() []string { InventoryOptionalFieldIntelligentTieringAccessTier, InventoryOptionalFieldBucketKeyStatus, InventoryOptionalFieldChecksumAlgorithm, + InventoryOptionalFieldObjectAccessControlList, + InventoryOptionalFieldObjectOwner, } } @@ -41455,6 +45218,18 @@ func JSONType_Values() []string { } } +const ( + // LocationTypeAvailabilityZone is a LocationType enum value + LocationTypeAvailabilityZone = "AvailabilityZone" +) + +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeAvailabilityZone, + } +} + const ( // MFADeleteEnabled is a MFADelete enum value MFADeleteEnabled = "Enabled" @@ -41655,8 +45430,19 @@ func ObjectLockRetentionMode_Values() []string { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that -// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control -// canned ACL or an equivalent form of this ACL expressed in the XML format. +// don't specify an ACL or specify bucket owner full control ACLs (such as the +// predefined bucket-owner-full-control canned ACL or a custom ACL in XML format +// that grants the same permissions). +// +// By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. +// We recommend keeping ACLs disabled, except in uncommon use cases where you +// must control access for each object individually. For more information about +// S3 Object Ownership, see Controlling ownership of objects and disabling ACLs +// for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// use the bucket owner enforced setting for S3 Object Ownership. const ( // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" @@ -41704,6 +45490,12 @@ const ( // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value ObjectStorageClassGlacierIr = "GLACIER_IR" + + // ObjectStorageClassSnow is a ObjectStorageClass enum value + ObjectStorageClassSnow = "SNOW" + + // ObjectStorageClassExpressOnezone is a ObjectStorageClass enum value + ObjectStorageClassExpressOnezone = "EXPRESS_ONEZONE" ) // ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum @@ -41718,6 +45510,8 @@ func ObjectStorageClass_Values() []string { ObjectStorageClassDeepArchive, ObjectStorageClassOutposts, ObjectStorageClassGlacierIr, + ObjectStorageClassSnow, + ObjectStorageClassExpressOnezone, } } @@ -41733,6 +45527,18 @@ func ObjectVersionStorageClass_Values() []string { } } +const ( + // OptionalObjectAttributesRestoreStatus is a OptionalObjectAttributes enum value + OptionalObjectAttributesRestoreStatus = "RestoreStatus" +) + +// OptionalObjectAttributes_Values returns all elements of the OptionalObjectAttributes enum +func OptionalObjectAttributes_Values() []string { + return []string{ + OptionalObjectAttributesRestoreStatus, + } +} + const ( // OwnerOverrideDestination is a OwnerOverride enum value OwnerOverrideDestination = "Destination" @@ -41745,6 +45551,22 @@ func OwnerOverride_Values() []string { } } +const ( + // PartitionDateSourceEventTime is a PartitionDateSource enum value + PartitionDateSourceEventTime = "EventTime" + + // PartitionDateSourceDeliveryTime is a PartitionDateSource enum value + PartitionDateSourceDeliveryTime = "DeliveryTime" +) + +// PartitionDateSource_Values returns all elements of the PartitionDateSource enum +func PartitionDateSource_Values() []string { + return []string{ + PartitionDateSourceEventTime, + PartitionDateSourceDeliveryTime, + } +} + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -41865,6 +45687,9 @@ const ( // ReplicationStatusReplica is a ReplicationStatus enum value ReplicationStatusReplica = "REPLICA" + + // ReplicationStatusCompleted is a ReplicationStatus enum value + ReplicationStatusCompleted = "COMPLETED" ) // ReplicationStatus_Values returns all elements of the ReplicationStatus enum @@ -41874,6 +45699,7 @@ func ReplicationStatus_Values() []string { ReplicationStatusPending, ReplicationStatusFailed, ReplicationStatusReplica, + ReplicationStatusCompleted, } } @@ -41895,6 +45721,8 @@ func ReplicationTimeStatus_Values() []string { // If present, indicates that the requester was successfully charged for the // request. +// +// This functionality is not supported for directory buckets. const ( // RequestChargedRequester is a RequestCharged enum value RequestChargedRequester = "requester" @@ -41908,10 +45736,14 @@ func RequestCharged_Values() []string { } // Confirms that the requester knows that they will be charged for the request. -// Bucket owners need not specify this parameter in their requests. For information -// about downloading objects from Requester Pays buckets, see Downloading Objects +// Bucket owners need not specify this parameter in their requests. If either +// the source or destination S3 bucket has Requester Pays enabled, the requester +// will pay for corresponding charges to copy the object. For information about +// downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" @@ -41942,6 +45774,9 @@ const ( // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value ServerSideEncryptionAwsKms = "aws:kms" + + // ServerSideEncryptionAwsKmsDsse is a ServerSideEncryption enum value + ServerSideEncryptionAwsKmsDsse = "aws:kms:dsse" ) // ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum @@ -41949,6 +45784,23 @@ func ServerSideEncryption_Values() []string { return []string{ ServerSideEncryptionAes256, ServerSideEncryptionAwsKms, + ServerSideEncryptionAwsKmsDsse, + } +} + +const ( + // SessionModeReadOnly is a SessionMode enum value + SessionModeReadOnly = "ReadOnly" + + // SessionModeReadWrite is a SessionMode enum value + SessionModeReadWrite = "ReadWrite" +) + +// SessionMode_Values returns all elements of the SessionMode enum +func SessionMode_Values() []string { + return []string{ + SessionModeReadOnly, + SessionModeReadWrite, } } @@ -41995,6 +45847,12 @@ const ( // StorageClassGlacierIr is a StorageClass enum value StorageClassGlacierIr = "GLACIER_IR" + + // StorageClassSnow is a StorageClass enum value + StorageClassSnow = "SNOW" + + // StorageClassExpressOnezone is a StorageClass enum value + StorageClassExpressOnezone = "EXPRESS_ONEZONE" ) // StorageClass_Values returns all elements of the StorageClass enum @@ -42009,6 +45867,8 @@ func StorageClass_Values() []string { StorageClassDeepArchive, StorageClassOutposts, StorageClassGlacierIr, + StorageClassSnow, + StorageClassExpressOnezone, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index cd6a2e8a..8a67333a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -25,6 +25,15 @@ const ( // "InvalidObjectState". // // Object is archived and inaccessible until restored. + // + // If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval + // storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering + // Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, + // before you can retrieve the object you must first restore a copy using RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). + // Otherwise, this operation returns an InvalidObjectState error. For information + // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) + // in the Amazon S3 User Guide. ErrCodeInvalidObjectState = "InvalidObjectState" // ErrCodeNoSuchBucket for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go index 339019d3..70feffab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -25,5 +25,5 @@ func add100Continue(r *request.Request) { return } - r.HTTPRequest.Header.Set("Expect", "100-Continue") + r.HTTPRequest.Header.Set("Expect", "100-continue") } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index 6d679a29..d13b4617 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -80,6 +80,10 @@ type S3API interface { CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + CreateSession(*s3.CreateSessionInput) (*s3.CreateSessionOutput, error) + CreateSessionWithContext(aws.Context, *s3.CreateSessionInput, ...request.Option) (*s3.CreateSessionOutput, error) + CreateSessionRequest(*s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput) + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) @@ -300,6 +304,13 @@ type S3API interface { ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + ListDirectoryBuckets(*s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsWithContext(aws.Context, *s3.ListDirectoryBucketsInput, ...request.Option) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsRequest(*s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput) + + ListDirectoryBucketsPages(*s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool) error + ListDirectoryBucketsPagesWithContext(aws.Context, *s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool, ...request.Option) error + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 1cd115f4..f9c6e786 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -23,9 +23,32 @@ type UploadInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. // - // This action is not supported by Amazon S3 on Outposts. + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The readable body payload to send to S3. @@ -33,31 +56,47 @@ type UploadInput struct { // The bucket name to which the PUT action was initiated. // - // When using this action with an access point, you must direct requests to - // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // - // When using this action with Amazon S3 on Outposts, you must direct requests - // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this action with S3 on Outposts through the Amazon Web Services SDKs, - // you provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header - // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with - // SSE-KMS. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. // // Specifying this header with a PUT action doesn’t affect bucket-level settings // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -65,16 +104,33 @@ type UploadInput struct { // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not - // using the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with - // the HTTP status code 400 Bad Request. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. // - // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm - // parameter. + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. // // The AWS SDK for Go v1 does not support automatic computing request payload // checksum. This feature is available in the AWS SDK for Go v2. If a value @@ -111,13 +167,13 @@ type UploadInput struct { ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` // Specifies presentational information for the object. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. @@ -130,41 +186,56 @@ type UploadInput struct { // integrity check. For more information about REST request authentication, // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // // If the ContentMD5 is provided for a multipart upload, it will be ignored. // Objects that will be uploaded in a single part, the ContentMD5 will be used. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // A standard MIME type describing the format of the contents. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The date and time at which the object is no longer cacheable. For more information, - // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. // - // This action is not supported by Amazon S3 on Outposts. + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the PUT action was initiated. @@ -176,25 +247,37 @@ type UploadInput struct { Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // Specifies whether a legal hold will be applied to this object. For more information - // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` // The date and time when you want this object's Object Lock to expire. Must // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting @@ -202,48 +285,80 @@ type UploadInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If x-amz-server-side-encryption is present and has the value of aws:kms, - // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms, but - // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses - // the Amazon Web Services managed key to protect the data. If the KMS key does - // not exist in the same account issuing the command, you must use the full - // ARN and not just the ID. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The server-side encryption algorithm used when storing this object in Amazon - // S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high availability. // Depending on performance needs, you can specify a different Storage Class. - // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, - // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. // // In the following example, the request header sets the redirect to an object // (anotherPage.html) in the same bucket: @@ -257,6 +372,9 @@ type UploadInput struct { // // For more information about website hosting in Amazon S3, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go index c84e8c6d..b744f45c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go @@ -13,6 +13,189 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) +const opBatchGetSecretValue = "BatchGetSecretValue" + +// BatchGetSecretValueRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetSecretValue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetSecretValue for more information on using the BatchGetSecretValue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchGetSecretValueRequest method. +// req, resp := client.BatchGetSecretValueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/BatchGetSecretValue +func (c *SecretsManager) BatchGetSecretValueRequest(input *BatchGetSecretValueInput) (req *request.Request, output *BatchGetSecretValueOutput) { + op := &request.Operation{ + Name: opBatchGetSecretValue, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetSecretValueInput{} + } + + output = &BatchGetSecretValueOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetSecretValue API operation for AWS Secrets Manager. +// +// Retrieves the contents of the encrypted fields SecretString or SecretBinary +// for up to 20 secrets. To retrieve a single secret, call GetSecretValue. +// +// To choose which secrets to retrieve, you can specify a list of secrets by +// name or ARN, or you can use filters. If Secrets Manager encounters errors +// such as AccessDeniedException while attempting to retrieve any of the secrets, +// you can see the errors in Errors in the response. +// +// Secrets Manager generates CloudTrail GetSecretValue log entries for each +// secret you request when you call this action. Do not include sensitive information +// in request parameters because it might be logged. For more information, see +// Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). +// +// Required permissions: secretsmanager:BatchGetSecretValue, and you must have +// secretsmanager:GetSecretValue for each secret. If you use filters, you must +// also have secretsmanager:ListSecrets. If the secrets are encrypted using +// customer-managed keys instead of the Amazon Web Services managed key aws/secretsmanager, +// then you also need kms:Decrypt permissions for the keys. For more information, +// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) +// and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation BatchGetSecretValue for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// Secrets Manager can't find the resource that you asked for. +// +// - InvalidParameterException +// The parameter name or value is invalid. +// +// - InvalidRequestException +// A parameter value is not valid for the current state of the resource. +// +// Possible causes: +// +// - The secret is scheduled for deletion. +// +// - You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// - The secret is managed by another service, and you must use that service +// to update it. For more information, see Secrets managed by other Amazon +// Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html). +// +// - DecryptionFailure +// Secrets Manager can't decrypt the protected secret text using the provided +// KMS key. +// +// - InternalServiceError +// An error occurred on the server side. +// +// - InvalidNextTokenException +// The NextToken value is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/BatchGetSecretValue +func (c *SecretsManager) BatchGetSecretValue(input *BatchGetSecretValueInput) (*BatchGetSecretValueOutput, error) { + req, out := c.BatchGetSecretValueRequest(input) + return out, req.Send() +} + +// BatchGetSecretValueWithContext is the same as BatchGetSecretValue with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetSecretValue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) BatchGetSecretValueWithContext(ctx aws.Context, input *BatchGetSecretValueInput, opts ...request.Option) (*BatchGetSecretValueOutput, error) { + req, out := c.BatchGetSecretValueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// BatchGetSecretValuePages iterates over the pages of a BatchGetSecretValue operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetSecretValue method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetSecretValue operation. +// pageNum := 0 +// err := client.BatchGetSecretValuePages(params, +// func(page *secretsmanager.BatchGetSecretValueOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SecretsManager) BatchGetSecretValuePages(input *BatchGetSecretValueInput, fn func(*BatchGetSecretValueOutput, bool) bool) error { + return c.BatchGetSecretValuePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// BatchGetSecretValuePagesWithContext same as BatchGetSecretValuePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) BatchGetSecretValuePagesWithContext(ctx aws.Context, input *BatchGetSecretValueInput, fn func(*BatchGetSecretValueOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *BatchGetSecretValueInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.BatchGetSecretValueRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*BatchGetSecretValueOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opCancelRotateSecret = "CancelRotateSecret" // CancelRotateSecretRequest generates a "aws/request.Request" representing the @@ -184,6 +367,10 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // protected secret data and the important information needed to manage the // secret. // +// For secrets that use managed rotation, you need to create the secret through +// the managing service. For more information, see Secrets Manager secrets managed +// by other Amazon Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html). +// // For information about creating a secret in the console, see Create a secret // (https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html). // @@ -214,7 +401,8 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). // // Required permissions: secretsmanager:CreateSecret. If you include tags in -// the secret, you also need secretsmanager:TagResource. For more information, +// the secret, you also need secretsmanager:TagResource. To add replica Regions, +// you must also have secretsmanager:ReplicateSecretToRegions. For more information, // see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) // and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html). // @@ -686,12 +874,10 @@ func (c *SecretsManager) GetRandomPasswordRequest(input *GetRandomPasswordInput) // // Generates a random password. We recommend that you specify the maximum length // and include every character type that the system you are generating a password -// for can support. +// for can support. By default, Secrets Manager uses uppercase and lowercase +// letters, numbers, and the following characters in passwords: !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ // // Secrets Manager generates a CloudTrail log entry when you call this action. -// Do not include sensitive information in request parameters because it might -// be logged. For more information, see Logging Secrets Manager events with -// CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). // // Required permissions: secretsmanager:GetRandomPassword. For more information, // see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) @@ -906,6 +1092,8 @@ func (c *SecretsManager) GetSecretValueRequest(input *GetSecretValueInput) (req // Retrieves the contents of the encrypted fields SecretString or SecretBinary // from the specified version of a secret, whichever contains content. // +// To retrieve the values for a group of secrets, call BatchGetSecretValue. +// // We recommend that you cache your secret values by using client-side caching. // Caching secrets improves speed and reduces your costs. For more information, // see Cache secrets for your applications (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieving-secrets.html). @@ -1195,13 +1383,13 @@ func (c *SecretsManager) ListSecretsRequest(input *ListSecretsInput) (req *reque // account, not including secrets that are marked for deletion. To see secrets // marked for deletion, use the Secrets Manager console. // -// ListSecrets is eventually consistent, however it might not reflect changes -// from the last five minutes. To get the latest information for a specific -// secret, use DescribeSecret. +// All Secrets Manager operations are eventually consistent. ListSecrets might +// not reflect changes from the last five minutes. You can get more recent information +// for a specific secret by calling DescribeSecret. // // To list the versions of a secret, use ListSecretVersionIds. // -// To get the secret value from SecretString or SecretBinary, call GetSecretValue. +// To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue. // // For information about finding secrets in the console, see Find secrets in // Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_search-secret.html). @@ -1227,6 +1415,21 @@ func (c *SecretsManager) ListSecretsRequest(input *ListSecretsInput) (req *reque // - InvalidParameterException // The parameter name or value is invalid. // +// - InvalidRequestException +// A parameter value is not valid for the current state of the resource. +// +// Possible causes: +// +// - The secret is scheduled for deletion. +// +// - You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// - The secret is managed by another service, and you must use that service +// to update it. For more information, see Secrets managed by other Amazon +// Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html). +// // - InvalidNextTokenException // The NextToken value is invalid. // @@ -1498,9 +1701,9 @@ func (c *SecretsManager) PutSecretValueRequest(input *PutSecretValueInput) (req // version; you can only create new ones. // // Secrets Manager generates a CloudTrail log entry when you call this action. -// Do not include sensitive information in request parameters except SecretBinary -// or SecretString because it might be logged. For more information, see Logging -// Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). +// Do not include sensitive information in request parameters except SecretBinary, +// SecretString, or RotationToken because it might be logged. For more information, +// see Logging Secrets Manager events with CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). // // Required permissions: secretsmanager:PutSecretValue. For more information, // see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) @@ -1736,8 +1939,12 @@ func (c *SecretsManager) ReplicateSecretToRegionsRequest(input *ReplicateSecretT // be logged. For more information, see Logging Secrets Manager events with // CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). // -// Required permissions: secretsmanager:ReplicateSecretToRegions. For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) +// Required permissions: secretsmanager:ReplicateSecretToRegions. If the primary +// secret is encrypted with a KMS key other than aws/secretsmanager, you also +// need kms:Decrypt permission to the key. To encrypt the replicated secret +// with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey +// and kms:Encrypt to the key. For more information, see IAM policy actions +// for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) // and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1949,38 +2156,21 @@ func (c *SecretsManager) RotateSecretRequest(input *RotateSecretInput) (req *req // RotateSecret API operation for AWS Secrets Manager. // // Configures and starts the asynchronous process of rotating the secret. For -// more information about rotation, see Rotate secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html). -// -// If you include the configuration parameters, the operation sets the values -// for the secret and then immediately starts a rotation. If you don't include -// the configuration parameters, the operation starts a rotation with the values -// already stored in the secret. -// -// For database credentials you want to rotate, for Secrets Manager to be able -// to rotate the secret, you must make sure the secret value is in the JSON -// structure of a database secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_secret_json_structure.html). -// In particular, if you want to use the alternating users strategy (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-two-users), -// your secret must contain the ARN of a superuser secret. -// -// To configure rotation, you also need the ARN of an Amazon Web Services Lambda -// function and the schedule for the rotation. The Lambda rotation function -// creates a new version of the secret and creates or updates the credentials -// on the database or service to match. After testing the new credentials, the -// function marks the new secret version with the staging label AWSCURRENT. -// Then anyone who retrieves the secret gets the new version. For more information, -// see How rotation works (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html). -// -// You can create the Lambda rotation function based on the rotation function -// templates (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) -// that Secrets Manager provides. Choose a template that matches your Rotation -// strategy (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html). +// information about rotation, see Rotate secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html) +// in the Secrets Manager User Guide. If you include the configuration parameters, +// the operation sets the values for the secret and then immediately starts +// a rotation. If you don't include the configuration parameters, the operation +// starts a rotation with the values already stored in the secret. // // When rotation is successful, the AWSPENDING staging label might be attached // to the same version as the AWSCURRENT version, or it might not be attached // to any version. If the AWSPENDING staging label is present but not attached // to the same version as AWSCURRENT, then any later invocation of RotateSecret // assumes that a previous rotation request is still in progress and returns -// an error. +// an error. When rotation is unsuccessful, the AWSPENDING staging label might +// be attached to an empty secret version. For more information, see Troubleshoot +// rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot_rotation.html) +// in the Secrets Manager User Guide. // // Secrets Manager generates a CloudTrail log entry when you call this action. // Do not include sensitive information in request parameters because it might @@ -2209,25 +2399,8 @@ func (c *SecretsManager) TagResourceRequest(input *TagResourceInput) (req *reque // part of the secret's metadata. They are not associated with specific versions // of the secret. This operation appends tags to the existing list of tags. // -// The following restrictions apply to tags: -// -// - Maximum number of tags per secret: 50 -// -// - Maximum key length: 127 Unicode characters in UTF-8 -// -// - Maximum value length: 255 Unicode characters in UTF-8 -// -// - Tag keys and values are case sensitive. -// -// - Do not use the aws: prefix in your tag names or values because Amazon -// Web Services reserves it for Amazon Web Services use. You can't edit or -// delete tag names or values with this prefix. Tags with this prefix do -// not count against your tags per secret limit. -// -// - If you use your tagging schema across multiple services and resources, -// other services might have restrictions on allowed characters. Generally -// allowed characters: letters, spaces, and numbers representable in UTF-8, -// plus the following special characters: + - = . _ : / @. +// For tag quotas and naming restrictions, see Service quotas for Tagging (https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas) +// in the Amazon Web Services General Reference guide. // // If you use tags as part of your security strategy, then adding or removing // a tag can change permissions. If successfully completing this operation would @@ -2464,6 +2637,10 @@ func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *req // // To change the rotation configuration of a secret, use RotateSecret instead. // +// To change a secret so that it is managed by another service, you need to +// recreate the secret in that service. See Secrets Manager secrets managed +// by other Amazon Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html). +// // We recommend you avoid calling UpdateSecret at a sustained rate of more than // once every 10 minutes. When you call UpdateSecret to update the secret value, // Secrets Manager creates a new version of the secret. Secrets Manager removes @@ -2490,9 +2667,11 @@ func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *req // Required permissions: secretsmanager:UpdateSecret. For more information, // see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) // and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html). -// If you use a customer managed key, you must also have kms:GenerateDataKey -// and kms:Decrypt permissions on the key. For more information, see Secret -// encryption and decryption (https://docs.aws.amazon.com/secretsmanager/latest/userguide/security-encryption.html). +// If you use a customer managed key, you must also have kms:GenerateDataKey, +// kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS +// key and you don't have kms:Encrypt permission to the new key, Secrets Manager +// does not re-ecrypt existing secret versions with the new key. For more information, +// see Secret encryption and decryption (https://docs.aws.amazon.com/secretsmanager/latest/userguide/security-encryption.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2763,8 +2942,8 @@ func (c *SecretsManager) ValidateResourcePolicyRequest(input *ValidateResourcePo // be logged. For more information, see Logging Secrets Manager events with // CloudTrail (https://docs.aws.amazon.com/secretsmanager/latest/userguide/retrieve-ct-entries.html). // -// Required permissions: secretsmanager:ValidateResourcePolicy. For more information, -// see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) +// Required permissions: secretsmanager:ValidateResourcePolicy and secretsmanager:PutResourcePolicy. +// For more information, see IAM policy actions for Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#reference_iam-permissions_actions) // and Authentication and access control in Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2825,6 +3004,209 @@ func (c *SecretsManager) ValidateResourcePolicyWithContext(ctx aws.Context, inpu return out, req.Send() } +// The error Secrets Manager encountered while retrieving an individual secret +// as part of BatchGetSecretValue. +type APIErrorType struct { + _ struct{} `type:"structure"` + + // The error Secrets Manager encountered while retrieving an individual secret + // as part of BatchGetSecretValue, for example ResourceNotFoundException,InvalidParameterException, + // InvalidRequestException, DecryptionFailure, or AccessDeniedException. + ErrorCode *string `type:"string"` + + // A message describing the error. + Message *string `type:"string"` + + // The ARN or name of the secret. + SecretId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s APIErrorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s APIErrorType) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *APIErrorType) SetErrorCode(v string) *APIErrorType { + s.ErrorCode = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *APIErrorType) SetMessage(v string) *APIErrorType { + s.Message = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *APIErrorType) SetSecretId(v string) *APIErrorType { + s.SecretId = &v + return s +} + +type BatchGetSecretValueInput struct { + _ struct{} `type:"structure"` + + // The filters to choose which secrets to retrieve. You must include Filters + // or SecretIdList, but not both. + Filters []*Filter `type:"list"` + + // The number of results to include in the response. + // + // If there are more results available, in the response, Secrets Manager includes + // NextToken. To get the next results, call BatchGetSecretValue again with the + // value from NextToken. To use this parameter, you must also use the Filters + // parameter. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates where the output should continue from, if a previous + // call did not show all results. To get the next results, call BatchGetSecretValue + // again with this value. + NextToken *string `min:"1" type:"string"` + + // The ARN or names of the secrets to retrieve. You must include Filters or + // SecretIdList, but not both. + SecretIdList []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetSecretValueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetSecretValueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetSecretValueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetSecretValueInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SecretIdList != nil && len(s.SecretIdList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretIdList", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *BatchGetSecretValueInput) SetFilters(v []*Filter) *BatchGetSecretValueInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *BatchGetSecretValueInput) SetMaxResults(v int64) *BatchGetSecretValueInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchGetSecretValueInput) SetNextToken(v string) *BatchGetSecretValueInput { + s.NextToken = &v + return s +} + +// SetSecretIdList sets the SecretIdList field's value. +func (s *BatchGetSecretValueInput) SetSecretIdList(v []*string) *BatchGetSecretValueInput { + s.SecretIdList = v + return s +} + +type BatchGetSecretValueOutput struct { + _ struct{} `type:"structure"` + + // A list of errors Secrets Manager encountered while attempting to retrieve + // individual secrets. + Errors []*APIErrorType `type:"list"` + + // Secrets Manager includes this value if there's more output available than + // what is included in the current response. This can occur even when the response + // includes no values at all, such as when you ask for a filtered view of a + // long list. To get the next results, call BatchGetSecretValue again with this + // value. + NextToken *string `min:"1" type:"string"` + + // A list of secret values. + SecretValues []*SecretValueEntry `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetSecretValueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetSecretValueOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchGetSecretValueOutput) SetErrors(v []*APIErrorType) *BatchGetSecretValueOutput { + s.Errors = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *BatchGetSecretValueOutput) SetNextToken(v string) *BatchGetSecretValueOutput { + s.NextToken = &v + return s +} + +// SetSecretValues sets the SecretValues field's value. +func (s *BatchGetSecretValueOutput) SetSecretValues(v []*SecretValueEntry) *BatchGetSecretValueOutput { + s.SecretValues = v + return s +} + type CancelRotateSecretInput struct { _ struct{} `type:"structure"` @@ -2943,10 +3325,10 @@ type CreateSecretInput struct { // If you use the Amazon Web Services CLI or one of the Amazon Web Services // SDKs to call this operation, then you can leave this parameter empty. The // CLI or SDK generates a random UUID for you and includes it as the value for - // this parameter in the request. If you don't use the SDK and instead generate - // a raw HTTP request to the Secrets Manager service endpoint, then you must - // generate a ClientRequestToken yourself for the new version and include the - // value in the request. + // this parameter in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. // // This value helps ensure idempotency. Secrets Manager uses this value to prevent // the accidental creation of duplicate versions if there are failures and retries @@ -2972,7 +3354,7 @@ type CreateSecretInput struct { Description *string `type:"string"` // Specifies whether to overwrite a secret with the same name in the destination - // Region. + // Region. By default, secrets aren't overwritten. ForceOverwriteReplicaSecret *bool `type:"boolean"` // The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt @@ -3012,12 +3394,16 @@ type CreateSecretInput struct { // // This parameter is not available in the Secrets Manager console. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretBinary is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateSecretInput's // String and GoString methods. // // SecretBinary is automatically base64 encoded/decoded by the SDK. - SecretBinary []byte `type:"blob" sensitive:"true"` + SecretBinary []byte `min:"1" type:"blob" sensitive:"true"` // The text data to encrypt and store in this new version of the secret. We // recommend you use a JSON structure of key/value pairs for your secret value. @@ -3029,10 +3415,14 @@ type CreateSecretInput struct { // The Secrets Manager console stores the information as a JSON structure of // key/value pairs that a Lambda rotation function can parse. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretString is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateSecretInput's // String and GoString methods. - SecretString *string `type:"string" sensitive:"true"` + SecretString *string `min:"1" type:"string" sensitive:"true"` // A list of tags to attach to the secret. Each tag is a key and value pair // of strings in a JSON text string, for example: @@ -3055,25 +3445,8 @@ type CreateSecretInput struct { // you should use single quotes to avoid confusion with the double quotes required // in the JSON text. // - // The following restrictions apply to tags: - // - // * Maximum number of tags per secret: 50 - // - // * Maximum key length: 127 Unicode characters in UTF-8 - // - // * Maximum value length: 255 Unicode characters in UTF-8 - // - // * Tag keys and values are case sensitive. - // - // * Do not use the aws: prefix in your tag names or values because Amazon - // Web Services reserves it for Amazon Web Services use. You can't edit or - // delete tag names or values with this prefix. Tags with this prefix do - // not count against your tags per secret limit. - // - // * If you use your tagging schema across multiple services and resources, - // other services might have restrictions on allowed characters. Generally - // allowed characters: letters, spaces, and numbers representable in UTF-8, - // plus the following special characters: + - = . _ : / @. + // For tag quotas and naming restrictions, see Service quotas for Tagging (https://docs.aws.amazon.com/general/latest/gr/arg.html#taged-reference-quotas) + // in the Amazon Web Services General Reference guide. Tags []*Tag `type:"list"` } @@ -3110,6 +3483,12 @@ func (s *CreateSecretInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.SecretBinary != nil && len(s.SecretBinary) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretBinary", 1)) + } + if s.SecretString != nil && len(*s.SecretString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretString", 1)) + } if s.AddReplicaRegions != nil { for i, v := range s.AddReplicaRegions { if v == nil { @@ -3422,24 +3801,28 @@ type DeleteSecretInput struct { // Specifies whether to delete the secret without any recovery window. You can't // use both this parameter and RecoveryWindowInDays in the same call. If you - // don't use either, then Secrets Manager defaults to a 30 day recovery window. + // don't use either, then by default Secrets Manager uses a 30 day recovery + // window. // // Secrets Manager performs the actual deletion with an asynchronous background // process, so there might be a short delay before the secret is permanently // deleted. If you delete a secret and then immediately create a secret with // the same name, use appropriate back off and retry logic. // + // If you forcibly delete an already deleted or nonexistent secret, the operation + // does not return ResourceNotFoundException. + // // Use this parameter with caution. This parameter causes the operation to skip // the normal recovery window before the permanent deletion that Secrets Manager // would normally impose with the RecoveryWindowInDays parameter. If you delete - // a secret with the ForceDeleteWithouRecovery parameter, then you have no opportunity - // to recover the secret. You lose the secret permanently. + // a secret with the ForceDeleteWithoutRecovery parameter, then you have no + // opportunity to recover the secret. You lose the secret permanently. ForceDeleteWithoutRecovery *bool `type:"boolean"` // The number of days from 7 to 30 that Secrets Manager waits before permanently // deleting the secret. You can't use both this parameter and ForceDeleteWithoutRecovery - // in the same call. If you don't use either, then Secrets Manager defaults - // to a 30 day recovery window. + // in the same call. If you don't use either, then by default Secrets Manager + // uses a 30 day recovery window. RecoveryWindowInDays *int64 `type:"long"` // The ARN or name of the secret to delete. @@ -3643,12 +4026,25 @@ type DescribeSecretOutput struct { LastChangedDate *time.Time `type:"timestamp"` // The last date and time that Secrets Manager rotated the secret. If the secret - // isn't configured for rotation, Secrets Manager returns null. + // isn't configured for rotation or rotation has been disabled, Secrets Manager + // returns null. LastRotatedDate *time.Time `type:"timestamp"` // The name of the secret. Name *string `min:"1" type:"string"` + // The next rotation is scheduled to occur on or before this date. If the secret + // isn't configured for rotation or rotation has been disabled, Secrets Manager + // returns null. If rotation fails, Secrets Manager retries the entire rotation + // process multiple times. If rotation is unsuccessful, this date may be in + // the past. + // + // This date represents the latest date that rotation will occur, but it is + // not an approximate rotation date. In some cases, for example if you turn + // off automatic rotation and then turn it back on, the next rotation may occur + // much sooner than this date. + NextRotationDate *time.Time `type:"timestamp"` + // The ID of the service that created this secret. For more information, see // Secrets managed by other Amazon Web Services services (https://docs.aws.amazon.com/secretsmanager/latest/userguide/service-linked-secrets.html). OwningService *string `min:"1" type:"string"` @@ -3667,7 +4063,8 @@ type DescribeSecretOutput struct { // * InSync, which indicates that the replica was created. ReplicationStatus []*ReplicationStatusType `type:"list"` - // Specifies whether automatic rotation is turned on for this secret. + // Specifies whether automatic rotation is turned on for this secret. If the + // secret has never been configured for rotation, Secrets Manager returns null. // // To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret. RotationEnabled *bool `type:"boolean"` @@ -3781,6 +4178,12 @@ func (s *DescribeSecretOutput) SetName(v string) *DescribeSecretOutput { return s } +// SetNextRotationDate sets the NextRotationDate field's value. +func (s *DescribeSecretOutput) SetNextRotationDate(v time.Time) *DescribeSecretOutput { + s.NextRotationDate = &v + return s +} + // SetOwningService sets the OwningService field's value. func (s *DescribeSecretOutput) SetOwningService(v string) *DescribeSecretOutput { s.OwningService = &v @@ -3912,6 +4315,8 @@ type Filter struct { // // * primary-region: Prefix match, case-sensitive. // + // * owning-service: Prefix match, case-sensitive. + // // * all: Breaks the filter value string into words and then searches all // attributes for matches. Not case-sensitive. Key *string `type:"string" enum:"FilterNameStringType"` @@ -4224,7 +4629,8 @@ func (s *GetResourcePolicyOutput) SetResourcePolicy(v string) *GetResourcePolicy type GetSecretValueInput struct { _ struct{} `type:"structure"` - // The ARN or name of the secret to retrieve. + // The ARN or name of the secret to retrieve. To retrieve a secret from another + // account, you must use an ARN. // // For an ARN, we recommend that you specify a complete ARN rather than a partial // ARN. See Finding a secret from a partial ARN (https://docs.aws.amazon.com/secretsmanager/latest/userguide/troubleshoot.html#ARN_secretnamehyphen). @@ -4324,20 +4730,24 @@ type GetSecretValueOutput struct { Name *string `min:"1" type:"string"` // The decrypted secret value, if the secret value was originally provided as - // binary data in the form of a byte array. The response parameter represents - // the binary data as a base64-encoded (https://tools.ietf.org/html/rfc4648#section-4) - // string. + // binary data in the form of a byte array. When you retrieve a SecretBinary + // using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value + // is Base64-encoded. Otherwise, it is not encoded. // // If the secret was created by using the Secrets Manager console, or if the // secret value was originally provided as a string, then this field is omitted. // The secret value appears in SecretString instead. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretBinary is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetSecretValueOutput's // String and GoString methods. // // SecretBinary is automatically base64 encoded/decoded by the SDK. - SecretBinary []byte `type:"blob" sensitive:"true"` + SecretBinary []byte `min:"1" type:"blob" sensitive:"true"` // The decrypted secret value, if the secret value was originally provided as // a string or through the Secrets Manager console. @@ -4345,10 +4755,14 @@ type GetSecretValueOutput struct { // If this secret was created by using the console, then Secrets Manager stores // the information as a JSON structure of key/value pairs. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretString is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetSecretValueOutput's // String and GoString methods. - SecretString *string `type:"string" sensitive:"true"` + SecretString *string `min:"1" type:"string" sensitive:"true"` // The unique identifier of this version of the secret. VersionId *string `min:"32" type:"string"` @@ -4755,7 +5169,8 @@ type ListSecretVersionIdsInput struct { // Specifies whether to include versions of secrets that don't have any staging // labels attached to them. Versions without staging labels are considered deprecated - // and are subject to deletion by Secrets Manager. + // and are subject to deletion by Secrets Manager. By default, versions without + // staging labels aren't included. IncludeDeprecated *bool `type:"boolean"` // The number of results to include in the response. @@ -4911,6 +5326,10 @@ type ListSecretsInput struct { // The filters to apply to the list of secrets. Filters []*Filter `type:"list"` + // Specifies whether to include secrets scheduled for deletion. By default, + // secrets scheduled for deletion aren't included. + IncludePlannedDeletion *bool `type:"boolean"` + // The number of results to include in the response. // // If there are more results available, in the response, Secrets Manager includes @@ -4923,7 +5342,7 @@ type ListSecretsInput struct { // again with this value. NextToken *string `min:"1" type:"string"` - // Lists secrets in the requested order. + // Secrets are listed by CreatedDate. SortOrder *string `type:"string" enum:"SortOrderType"` } @@ -4977,6 +5396,12 @@ func (s *ListSecretsInput) SetFilters(v []*Filter) *ListSecretsInput { return s } +// SetIncludePlannedDeletion sets the IncludePlannedDeletion field's value. +func (s *ListSecretsInput) SetIncludePlannedDeletion(v bool) *ListSecretsInput { + s.IncludePlannedDeletion = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListSecretsInput) SetMaxResults(v int64) *ListSecretsInput { s.MaxResults = &v @@ -5235,7 +5660,23 @@ type PutResourcePolicyInput struct { _ struct{} `type:"structure"` // Specifies whether to block resource-based policies that allow broad access - // to the secret, for example those that use a wildcard for the principal. + // to the secret, for example those that use a wildcard for the principal. By + // default, public policies aren't blocked. + // + // Resource policy validation and the BlockPublicPolicy parameter help protect + // your resources by preventing public access from being granted through the + // resource policies that are directly attached to your secrets. In addition + // to using these features, carefully inspect the following policies to confirm + // that they do not grant public access: + // + // * Identity-based policies attached to associated Amazon Web Services principals + // (for example, IAM roles) + // + // * Resource-based policies attached to associated Amazon Web Services resources + // (for example, Key Management Service (KMS) keys) + // + // To review permissions to your secrets, see Determine who has permissions + // to your secrets (https://docs.aws.amazon.com/secretsmanager/latest/userguide/determine-acccess_examine-iam-policies.html). BlockPublicPolicy *bool `type:"boolean"` // A JSON-formatted string for an Amazon Web Services resource-based policy. @@ -5357,17 +5798,17 @@ type PutSecretValueInput struct { // A unique identifier for the new version of the secret. // // If you use the Amazon Web Services CLI or one of the Amazon Web Services - // SDKs to call this operation, then you can leave this parameter empty because - // they generate a random UUID for you. If you don't use the SDK and instead - // generate a raw HTTP request to the Secrets Manager service endpoint, then - // you must generate a ClientRequestToken yourself for new versions and include - // that value in the request. + // SDKs to call this operation, then you can leave this parameter empty. The + // CLI or SDK generates a random UUID for you and includes it as the value for + // this parameter in the request. + // + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. // // This value helps ensure idempotency. Secrets Manager uses this value to prevent // the accidental creation of duplicate versions if there are failures and retries - // during the Lambda rotation function processing. We recommend that you generate - // a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) value - // to ensure uniqueness within the specified secret. + // during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness of your versions within the specified secret. // // * If the ClientRequestToken value isn't already associated with a version // of the secret then a new version of the secret is created. @@ -5384,6 +5825,21 @@ type PutSecretValueInput struct { // This value becomes the VersionId of the new version. ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + // A unique identifier that indicates the source of the request. For cross-account + // rotation (when you rotate a secret in one account by using a Lambda rotation + // function in another account) and the Lambda rotation function assumes an + // IAM role to call Secrets Manager, Secrets Manager validates the identity + // with the rotation token. For more information, see How rotation works (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html). + // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // + // RotationToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutSecretValueInput's + // String and GoString methods. + RotationToken *string `min:"36" type:"string" sensitive:"true"` + // The binary data to encrypt and store in the new version of the secret. To // use this parameter in the command-line tools, we recommend that you store // your binary data in a file and then pass the contents of the file as a parameter. @@ -5392,12 +5848,16 @@ type PutSecretValueInput struct { // // You can't access this value from the Secrets Manager console. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretBinary is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutSecretValueInput's // String and GoString methods. // // SecretBinary is automatically base64 encoded/decoded by the SDK. - SecretBinary []byte `type:"blob" sensitive:"true"` + SecretBinary []byte `min:"1" type:"blob" sensitive:"true"` // The ARN or name of the secret to add a new version to. // @@ -5416,10 +5876,14 @@ type PutSecretValueInput struct { // We recommend you create the secret string as JSON key/value pairs, as shown // in the example. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretString is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by PutSecretValueInput's // String and GoString methods. - SecretString *string `type:"string" sensitive:"true"` + SecretString *string `min:"1" type:"string" sensitive:"true"` // A list of staging labels to attach to this version of the secret. Secrets // Manager uses staging labels to track versions of a secret through the rotation @@ -5461,12 +5925,21 @@ func (s *PutSecretValueInput) Validate() error { if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) } + if s.RotationToken != nil && len(*s.RotationToken) < 36 { + invalidParams.Add(request.NewErrParamMinLen("RotationToken", 36)) + } + if s.SecretBinary != nil && len(s.SecretBinary) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretBinary", 1)) + } if s.SecretId == nil { invalidParams.Add(request.NewErrParamRequired("SecretId")) } if s.SecretId != nil && len(*s.SecretId) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) } + if s.SecretString != nil && len(*s.SecretString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretString", 1)) + } if s.VersionStages != nil && len(s.VersionStages) < 1 { invalidParams.Add(request.NewErrParamMinLen("VersionStages", 1)) } @@ -5483,6 +5956,12 @@ func (s *PutSecretValueInput) SetClientRequestToken(v string) *PutSecretValueInp return s } +// SetRotationToken sets the RotationToken field's value. +func (s *PutSecretValueInput) SetRotationToken(v string) *PutSecretValueInput { + s.RotationToken = &v + return s +} + // SetSecretBinary sets the SecretBinary field's value. func (s *PutSecretValueInput) SetSecretBinary(v []byte) *PutSecretValueInput { s.SecretBinary = v @@ -5737,7 +6216,7 @@ type ReplicateSecretToRegionsInput struct { AddReplicaRegions []*ReplicaRegionType `min:"1" type:"list" required:"true"` // Specifies whether to overwrite a secret with the same name in the destination - // Region. + // Region. By default, secrets aren't overwritten. ForceOverwriteReplicaSecret *bool `type:"boolean"` // The ARN or name of the secret to replicate. @@ -6147,38 +6626,42 @@ func (s *RestoreSecretOutput) SetName(v string) *RestoreSecretOutput { type RotateSecretInput struct { _ struct{} `type:"structure"` - // A unique identifier for the new version of the secret that helps ensure idempotency. - // Secrets Manager uses this value to prevent the accidental creation of duplicate - // versions if there are failures and retries during rotation. This value becomes - // the VersionId of the new version. + // A unique identifier for the new version of the secret. You only need to specify + // this value if you implement your own retry logic and you want to ensure that + // Secrets Manager doesn't attempt to create a secret version twice. // // If you use the Amazon Web Services CLI or one of the Amazon Web Services - // SDK to call this operation, then you can leave this parameter empty. The - // CLI or SDK generates a random UUID for you and includes that in the request - // for this parameter. If you don't use the SDK and instead generate a raw HTTP - // request to the Secrets Manager service endpoint, then you must generate a - // ClientRequestToken yourself for new versions and include that value in the - // request. + // SDKs to call this operation, then you can leave this parameter empty. The + // CLI or SDK generates a random UUID for you and includes it as the value for + // this parameter in the request. // - // You only need to specify this value if you implement your own retry logic - // and you want to ensure that Secrets Manager doesn't attempt to create a secret - // version twice. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) - // value to ensure uniqueness within the specified secret. + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness of your versions within the specified secret. ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` // Specifies whether to rotate the secret immediately or wait until the next // scheduled rotation window. The rotation schedule is defined in RotateSecretRequest$RotationRules. // - // If you don't immediately rotate the secret, Secrets Manager tests the rotation - // configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) + // For secrets that use a Lambda rotation function to rotate, if you don't immediately + // rotate the secret, Secrets Manager tests the rotation configuration by running + // the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_lambda-functions.html#rotate-secrets_lambda-functions-code) // of the Lambda rotation function. The test creates an AWSPENDING version of // the secret and then removes it. // - // If you don't specify this value, then by default, Secrets Manager rotates - // the secret immediately. + // By default, Secrets Manager rotates the secret immediately. RotateImmediately *bool `type:"boolean"` - // The ARN of the Lambda rotation function that can rotate the secret. + // For secrets that use a Lambda rotation function to rotate, the ARN of the + // Lambda rotation function. + // + // For secrets that use managed rotation, omit this field. For more information, + // see Managed rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_managed.html) + // in the Secrets Manager User Guide. RotationLambdaARN *string `type:"string"` // A structure that defines the rotation configuration for this secret. @@ -6318,41 +6801,52 @@ func (s *RotateSecretOutput) SetVersionId(v string) *RotateSecretOutput { type RotationRulesType struct { _ struct{} `type:"structure"` - // The number of days between automatic scheduled rotations of the secret. You - // can use this value to check that your secret meets your compliance guidelines - // for how often secrets must be rotated. + // The number of days between rotations of the secret. You can use this value + // to check that your secret meets your compliance guidelines for how often + // secrets must be rotated. If you use this field to set the rotation schedule, + // Secrets Manager calculates the next rotation date based on the previous rotation. + // Manually updating the secret value by calling PutSecretValue or UpdateSecret + // is considered a valid rotation. // // In DescribeSecret and ListSecrets, this value is calculated from the rotation // schedule after every successful rotation. In RotateSecret, you can set the // rotation schedule in RotationRules with AutomaticallyAfterDays or ScheduleExpression, - // but not both. + // but not both. To set a rotation schedule in hours, use ScheduleExpression. AutomaticallyAfterDays *int64 `min:"1" type:"long"` // The length of the rotation window in hours, for example 3h for a three hour // window. Secrets Manager rotates your secret at any time during this window. - // The window must not go into the next UTC day. If you don't specify this value, - // the window automatically ends at the end of the UTC day. The window begins - // according to the ScheduleExpression. For more information, including examples, - // see Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html). + // The window must not extend into the next rotation window or the next UTC + // day. The window starts according to the ScheduleExpression. If you don't + // specify a Duration, for a ScheduleExpression in hours, the window automatically + // closes after one hour. For a ScheduleExpression in days, the window automatically + // closes at the end of the UTC day. For more information, including examples, + // see Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html) + // in the Secrets Manager Users Guide. Duration *string `min:"2" type:"string"` // A cron() or rate() expression that defines the schedule for rotating your - // secret. Secrets Manager rotation schedules use UTC time zone. + // secret. Secrets Manager rotation schedules use UTC time zone. Secrets Manager + // rotates your secret any time during a rotation window. // - // Secrets Manager rate() expressions represent the interval in days that you - // want to rotate your secret, for example rate(10 days). If you use a rate() - // expression, the rotation window opens at midnight, and Secrets Manager rotates - // your secret any time that day after midnight. You can set a Duration to shorten - // the rotation window. + // Secrets Manager rate() expressions represent the interval in hours or days + // that you want to rotate your secret, for example rate(12 hours) or rate(10 + // days). You can rotate a secret as often as every four hours. If you use a + // rate() expression, the rotation window starts at midnight. For a rate in + // hours, the default rotation window closes after one hour. For a rate in days, + // the default rotation window closes at the end of the day. You can set the + // Duration to change the rotation window. The rotation window must not extend + // into the next UTC day or into the next rotation window. // - // You can use a cron() expression to create rotation schedules that are more + // You can use a cron() expression to create a rotation schedule that is more // detailed than a rotation interval. For more information, including examples, - // see Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html). - // If you use a cron() expression, Secrets Manager rotates your secret any time - // during that day after the window opens. For example, cron(0 8 1 * ? *) represents - // a rotation window that occurs on the first day of every month beginning at - // 8:00 AM UTC. Secrets Manager rotates the secret any time that day after 8:00 - // AM. You can set a Duration to shorten the rotation window. + // see Schedule expressions in Secrets Manager rotation (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_schedule.html) + // in the Secrets Manager Users Guide. For a cron expression that represents + // a schedule in hours, the default rotation window closes after one hour. For + // a cron expression that represents a schedule in days, the default rotation + // window closes at the end of the day. You can set the Duration to change the + // rotation window. The rotation window must not extend into the next UTC day + // or into the next rotation window. ScheduleExpression *string `min:"1" type:"string"` } @@ -6449,12 +6943,14 @@ type SecretListEntry struct { // successfully completed. This value is null if the secret hasn't ever rotated. LastRotatedDate *time.Time `type:"timestamp"` - // The friendly name of the secret. You can use forward slashes in the name - // to represent a path hierarchy. For example, /prod/databases/dbserver1 could - // represent the secret for a server named dbserver1 in the folder databases - // in the folder prod. + // The friendly name of the secret. Name *string `min:"1" type:"string"` + // The next rotation is scheduled to occur on or before this date. If the secret + // isn't configured for rotation or rotation has been disabled, Secrets Manager + // returns null. + NextRotationDate *time.Time `type:"timestamp"` + // Returns the name of the service that created the secret. OwningService *string `min:"1" type:"string"` @@ -6558,6 +7054,12 @@ func (s *SecretListEntry) SetName(v string) *SecretListEntry { return s } +// SetNextRotationDate sets the NextRotationDate field's value. +func (s *SecretListEntry) SetNextRotationDate(v time.Time) *SecretListEntry { + s.NextRotationDate = &v + return s +} + // SetOwningService sets the OwningService field's value. func (s *SecretListEntry) SetOwningService(v string) *SecretListEntry { s.OwningService = &v @@ -6600,6 +7102,107 @@ func (s *SecretListEntry) SetTags(v []*Tag) *SecretListEntry { return s } +// A structure that contains the secret value and other details for a secret. +type SecretValueEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the secret. + ARN *string `min:"20" type:"string"` + + // The date the secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // The friendly name of the secret. + Name *string `min:"1" type:"string"` + + // The decrypted secret value, if the secret value was originally provided as + // binary data in the form of a byte array. The parameter represents the binary + // data as a base64-encoded (https://tools.ietf.org/html/rfc4648#section-4) + // string. + // + // SecretBinary is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SecretValueEntry's + // String and GoString methods. + // + // SecretBinary is automatically base64 encoded/decoded by the SDK. + SecretBinary []byte `min:"1" type:"blob" sensitive:"true"` + + // The decrypted secret value, if the secret value was originally provided as + // a string or through the Secrets Manager console. + // + // SecretString is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SecretValueEntry's + // String and GoString methods. + SecretString *string `min:"1" type:"string" sensitive:"true"` + + // The unique version identifier of this version of the secret. + VersionId *string `min:"32" type:"string"` + + // A list of all of the staging labels currently attached to this version of + // the secret. + VersionStages []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SecretValueEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SecretValueEntry) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *SecretValueEntry) SetARN(v string) *SecretValueEntry { + s.ARN = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *SecretValueEntry) SetCreatedDate(v time.Time) *SecretValueEntry { + s.CreatedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *SecretValueEntry) SetName(v string) *SecretValueEntry { + s.Name = &v + return s +} + +// SetSecretBinary sets the SecretBinary field's value. +func (s *SecretValueEntry) SetSecretBinary(v []byte) *SecretValueEntry { + s.SecretBinary = v + return s +} + +// SetSecretString sets the SecretString field's value. +func (s *SecretValueEntry) SetSecretString(v string) *SecretValueEntry { + s.SecretString = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *SecretValueEntry) SetVersionId(v string) *SecretValueEntry { + s.VersionId = &v + return s +} + +// SetVersionStages sets the VersionStages field's value. +func (s *SecretValueEntry) SetVersionStages(v []*string) *SecretValueEntry { + s.VersionStages = v + return s +} + // A structure that contains information about one version of a secret. type SecretVersionsListEntry struct { _ struct{} `type:"structure"` @@ -7016,12 +7619,15 @@ type UpdateSecretInput struct { // If you use the Amazon Web Services CLI or one of the Amazon Web Services // SDKs to call this operation, then you can leave this parameter empty. The // CLI or SDK generates a random UUID for you and includes it as the value for - // this parameter in the request. If you don't use the SDK and instead generate - // a raw HTTP request to the Secrets Manager service endpoint, then you must - // generate a ClientRequestToken yourself for the new version and include the - // value in the request. + // this parameter in the request. // - // This value becomes the VersionId of the new version. + // If you generate a raw HTTP request to the Secrets Manager service endpoint, + // then you must generate a ClientRequestToken and include it in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness of your versions within the specified secret. ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` // The description of the secret. @@ -7029,8 +7635,10 @@ type UpdateSecretInput struct { // The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt // new secret versions as well as any existing versions with the staging labels - // AWSCURRENT, AWSPENDING, or AWSPREVIOUS. For more information about versions - // and staging labels, see Concepts: Version (https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version). + // AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission + // to the new key, Secrets Manager does not re-ecrypt existing secret versions + // with the new key. For more information about versions and staging labels, + // see Concepts: Version (https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version). // // A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. // For more information, see About aliases (https://docs.aws.amazon.com/kms/latest/developerguide/alias-about.html). @@ -7058,12 +7666,16 @@ type UpdateSecretInput struct { // // You can't access this parameter in the Secrets Manager console. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretBinary is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UpdateSecretInput's // String and GoString methods. // // SecretBinary is automatically base64 encoded/decoded by the SDK. - SecretBinary []byte `type:"blob" sensitive:"true"` + SecretBinary []byte `min:"1" type:"blob" sensitive:"true"` // The ARN or name of the secret. // @@ -7078,10 +7690,14 @@ type UpdateSecretInput struct { // // Either SecretBinary or SecretString must have a value, but not both. // + // Sensitive: This field contains sensitive information, so the service does + // not include it in CloudTrail log entries. If you create your own log entries, + // you must also avoid logging the information in this field. + // // SecretString is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by UpdateSecretInput's // String and GoString methods. - SecretString *string `type:"string" sensitive:"true"` + SecretString *string `min:"1" type:"string" sensitive:"true"` } // String returns the string representation. @@ -7108,12 +7724,18 @@ func (s *UpdateSecretInput) Validate() error { if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) } + if s.SecretBinary != nil && len(s.SecretBinary) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretBinary", 1)) + } if s.SecretId == nil { invalidParams.Add(request.NewErrParamRequired("SecretId")) } if s.SecretId != nil && len(*s.SecretId) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) } + if s.SecretString != nil && len(*s.SecretString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretString", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7360,7 +7982,8 @@ type ValidateResourcePolicyInput struct { // ResourcePolicy is a required field ResourcePolicy *string `min:"1" type:"string" required:"true"` - // This field is reserved for internal use. + // The ARN or name of the secret with the resource-based policy you want to + // validate. SecretId *string `min:"1" type:"string"` } @@ -7511,6 +8134,9 @@ const ( // FilterNameStringTypePrimaryRegion is a FilterNameStringType enum value FilterNameStringTypePrimaryRegion = "primary-region" + // FilterNameStringTypeOwningService is a FilterNameStringType enum value + FilterNameStringTypeOwningService = "owning-service" + // FilterNameStringTypeAll is a FilterNameStringType enum value FilterNameStringTypeAll = "all" ) @@ -7523,6 +8149,7 @@ func FilterNameStringType_Values() []string { FilterNameStringTypeTagKey, FilterNameStringTypeTagValue, FilterNameStringTypePrimaryRegion, + FilterNameStringTypeOwningService, FilterNameStringTypeAll, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go index e4a98386..402a2dd6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go @@ -15,6 +15,9 @@ // This version of the Secrets Manager API Reference documents the Secrets Manager // API version 2017-10-17. // +// For a list of endpoints, see Amazon Web Services Secrets Manager endpoints +// (https://docs.aws.amazon.com/secretsmanager/latest/userguide/asm_access.html#endpoints). +// // # Support and Feedback for Amazon Web Services Secrets Manager // // We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/BUILD.bazel new file mode 100644 index 00000000..8774a29b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ssooidc", + srcs = [ + "api.go", + "doc.go", + "errors.go", + "service.go", + ], + importmap = "peridot.resf.org/vendor/github.com/aws/aws-sdk-go/service/ssooidc", + importpath = "github.com/aws/aws-sdk-go/service/ssooidc", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/aws/aws-sdk-go/aws", + "//vendor/github.com/aws/aws-sdk-go/aws/awsutil", + "//vendor/github.com/aws/aws-sdk-go/aws/client", + "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata", + "//vendor/github.com/aws/aws-sdk-go/aws/credentials", + "//vendor/github.com/aws/aws-sdk-go/aws/request", + "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:signer", + "//vendor/github.com/aws/aws-sdk-go/private/protocol", + "//vendor/github.com/aws/aws-sdk-go/private/protocol/restjson", + ], +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 00000000..827bd519 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,2406 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be access_denied. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be authorization_pending. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used to notify the client that the returned token is an access token. The + // supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be expired_token. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be server_error. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client_metadata. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_grant. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_scope. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // An endpoint that the client can use to request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // An endpoint that the client can use to create tokens. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be slow_down. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unauthorized_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unsupported_grant_type. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 00000000..083568c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the CLI. +// +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). +// +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into Amazon Web Services Auth (“sigv4”) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 00000000..cadf4584 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,123 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 00000000..782bae36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "sso-oauth" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 2b7e675a..2c395f5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // @@ -74,21 +73,21 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is // allowed to assume the role in the role trust policy. // // To assume a role from a different account, your Amazon Web Services account @@ -97,9 +96,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. // // To allow a user to assume a role in the same account, you can do either of // the following: @@ -307,16 +306,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Calling AssumeRoleWithSAML does not require the use of Amazon Web Services @@ -343,11 +342,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -517,10 +517,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // a user. You can also supply the user with a consistent identity throughout // the lifetime of an application. // -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web // Services security credentials. Therefore, you can distribute an application @@ -563,16 +561,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // # Tags @@ -588,11 +586,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -983,11 +982,11 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // call the operation. // // No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1062,18 +1061,26 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // GetFederationToken API operation for AWS Security Token Service. // // Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// // You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, // or an OpenID Connect-compatible identity provider. In this case, we recommend @@ -1082,37 +1089,31 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// // # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). // // # Permissions // // You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Amazon Web Services service with the following exceptions: // // - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// API. This limitation does not apply to console sessions. // // - You cannot call any STS operations except GetCallerIdentity. // +// You can use temporary credentials for single sign-on (SSO) to the console. +// // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass @@ -1264,12 +1265,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // or IAM user. The credentials consist of an access key ID, a secret access // key, and a security token. Typically, you use GetSessionToken if you want // to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) @@ -1284,13 +1286,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. // // # Permissions // @@ -1302,20 +1303,20 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // // - You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. // -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1424,11 +1425,12 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1441,11 +1443,12 @@ type AssumeRoleInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1457,6 +1460,17 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + ProvidedContexts []*ProvidedContext `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1520,11 +1534,12 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1629,6 +1644,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1670,6 +1695,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn return s } +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1843,11 +1874,12 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1860,11 +1892,12 @@ type AssumeRoleWithSAMLInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1892,8 +1925,12 @@ type AssumeRoleWithSAMLInput struct { // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2028,7 +2065,7 @@ type AssumeRoleWithSAMLOutput struct { // IAM. // // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. + // a user. // // The following pseudocode shows how the hash value is calculated: // @@ -2190,11 +2227,12 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2207,11 +2245,12 @@ type AssumeRoleWithWebIdentityInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2254,10 +2293,15 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. + // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2563,8 +2607,12 @@ type Credentials struct { // The secret access key that can be used to sign requests. // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` // The token that users must pass to the service API to use the temporary credentials. // @@ -2912,10 +2960,9 @@ type GetFederationTokenInput struct { // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2934,8 +2981,8 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2960,11 +3007,12 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2973,11 +3021,12 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -2997,11 +3046,12 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -3015,11 +3065,12 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user @@ -3362,6 +3413,67 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + ContextAssertion *string `min:"4" type:"string"` + + // The context provider ARN from which the trusted context assertion was generated. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags // to control access to resources. For more information, see Tagging Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index c40f5a2a..ea1d9eb0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -4,10 +4,9 @@ // requests to AWS Security Token Service. // // Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/BUILD.bazel b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/BUILD.bazel index 2017b3ce..fc26dcce 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/BUILD.bazel +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/BUILD.bazel @@ -4,6 +4,8 @@ go_library( name = "gitdiff", srcs = [ "apply.go", + "apply_binary.go", + "apply_text.go", "base85.go", "binary.go", "file_header.go", @@ -11,6 +13,7 @@ go_library( "io.go", "parser.go", "patch_header.go", + "patch_identity.go", "text.go", ], importmap = "peridot.resf.org/vendor/github.com/bluekeyes/go-gitdiff/gitdiff", diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply.go index 4397d995..44bbcca8 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply.go @@ -13,10 +13,9 @@ import ( // Users can test if an error was caused by a conflict by using errors.Is with // an empty Conflict: // -// if errors.Is(err, &Conflict{}) { -// // handle conflict -// } -// +// if errors.Is(err, &Conflict{}) { +// // handle conflict +// } type Conflict struct { msg string } @@ -89,85 +88,36 @@ func applyError(err error, args ...interface{}) error { var ( errApplyInProgress = errors.New("gitdiff: incompatible apply in progress") + errApplierClosed = errors.New("gitdiff: applier is closed") ) -const ( - applyInitial = iota - applyText - applyBinary - applyFile -) - -// Apply is a convenience function that creates an Applier for src with default -// settings and applies the changes in f, writing the result to dst. +// Apply applies the changes in f to src, writing the result to dst. It can +// apply both text and binary changes. +// +// If an error occurs while applying, Apply returns an *ApplyError that +// annotates the error with additional information. If the error is because of +// a conflict with the source, the wrapped error will be a *Conflict. func Apply(dst io.Writer, src io.ReaderAt, f *File) error { - return NewApplier(src).ApplyFile(dst, f) -} - -// Applier applies changes described in fragments to source data. If changes -// are described in multiple fragments, those fragments must be applied in -// order, usually by calling ApplyFile. -// -// By default, Applier operates in "strict" mode, where fragment content and -// positions must exactly match those of the source. -// -// If an error occurs while applying, methods on Applier return instances of -// *ApplyError that annotate the wrapped error with additional information -// when available. If the error is because of a conflict between a fragment and -// the source, the wrapped error will be a *Conflict. -// -// While an Applier can apply both text and binary fragments, only one fragment -// type can be used without resetting the Applier. The first fragment applied -// sets the type for the Applier. Mixing fragment types or mixing -// fragment-level and file-level applies results in an error. -type Applier struct { - src io.ReaderAt - lineSrc LineReaderAt - nextLine int64 - applyType int -} - -// NewApplier creates an Applier that reads data from src. If src is a -// LineReaderAt, it is used directly to apply text fragments. -func NewApplier(src io.ReaderAt) *Applier { - a := new(Applier) - a.Reset(src) - return a -} - -// Reset resets the input and internal state of the Applier. If src is nil, the -// existing source is reused. -func (a *Applier) Reset(src io.ReaderAt) { - if src != nil { - a.src = src - if lineSrc, ok := src.(LineReaderAt); ok { - a.lineSrc = lineSrc - } else { - a.lineSrc = &lineReaderAt{r: src} + if f.IsBinary { + if len(f.TextFragments) > 0 { + return applyError(errors.New("binary file contains text fragments")) + } + if f.BinaryFragment == nil { + return applyError(errors.New("binary file does not contain a binary fragment")) + } + } else { + if f.BinaryFragment != nil { + return applyError(errors.New("text file contains a binary fragment")) } - } - a.nextLine = 0 - a.applyType = applyInitial -} - -// ApplyFile applies the changes in all of the fragments of f and writes the -// result to dst. -func (a *Applier) ApplyFile(dst io.Writer, f *File) error { - if a.applyType != applyInitial { - return applyError(errApplyInProgress) - } - defer func() { a.applyType = applyFile }() - - if f.IsBinary && len(f.TextFragments) > 0 { - return applyError(errors.New("binary file contains text fragments")) - } - if !f.IsBinary && f.BinaryFragment != nil { - return applyError(errors.New("text file contains binary fragment")) } switch { case f.BinaryFragment != nil: - return a.ApplyBinaryFragment(dst, f.BinaryFragment) + applier := NewBinaryApplier(dst, src) + if err := applier.ApplyFragment(f.BinaryFragment); err != nil { + return err + } + return applier.Close() case len(f.TextFragments) > 0: frags := make([]*TextFragment, len(f.TextFragments)) @@ -181,274 +131,17 @@ func (a *Applier) ApplyFile(dst io.Writer, f *File) error { // right now, the application fails if fragments overlap, but it should be // possible to precompute the result of applying them in order + applier := NewTextApplier(dst, src) for i, frag := range frags { - if err := a.ApplyTextFragment(dst, frag); err != nil { + if err := applier.ApplyFragment(frag); err != nil { return applyError(err, fragNum(i)) } } - } + return applier.Close() - return applyError(a.Flush(dst)) -} - -// ApplyTextFragment applies the changes in the fragment f and writes unwritten -// data before the start of the fragment and the result to dst. If multiple -// text fragments apply to the same source, ApplyTextFragment must be called in -// order of increasing start position. As a result, each fragment can be -// applied at most once before a call to Reset. -func (a *Applier) ApplyTextFragment(dst io.Writer, f *TextFragment) error { - if a.applyType != applyInitial && a.applyType != applyText { - return applyError(errApplyInProgress) - } - defer func() { a.applyType = applyText }() - - // application code assumes fragment fields are consistent - if err := f.Validate(); err != nil { - return applyError(err) - } - - // lines are 0-indexed, positions are 1-indexed (but new files have position = 0) - fragStart := f.OldPosition - 1 - if fragStart < 0 { - fragStart = 0 - } - fragEnd := fragStart + f.OldLines - - start := a.nextLine - if fragStart < start { - return applyError(&Conflict{"fragment overlaps with an applied fragment"}) - } - - if f.OldPosition == 0 { - ok, err := isLen(a.src, 0) - if err != nil { - return applyError(err) - } - if !ok { - return applyError(&Conflict{"cannot create new file from non-empty src"}) - } - } - - preimage := make([][]byte, fragEnd-start) - n, err := a.lineSrc.ReadLinesAt(preimage, start) - switch { - case err == nil: - case err == io.EOF && n == len(preimage): // last line of frag has no newline character default: - return applyError(err, lineNum(start+int64(n))) - } - - // copy leading data before the fragment starts - for i, line := range preimage[:fragStart-start] { - if _, err := dst.Write(line); err != nil { - a.nextLine = start + int64(i) - return applyError(err, lineNum(a.nextLine)) - } - } - preimage = preimage[fragStart-start:] - - // apply the changes in the fragment - used := int64(0) - for i, line := range f.Lines { - if err := applyTextLine(dst, line, preimage, used); err != nil { - a.nextLine = fragStart + used - return applyError(err, lineNum(a.nextLine), fragLineNum(i)) - } - if line.Old() { - used++ - } - } - a.nextLine = fragStart + used - - // new position of +0,0 mean a full delete, so check for leftovers - if f.NewPosition == 0 && f.NewLines == 0 { - var b [1][]byte - n, err := a.lineSrc.ReadLinesAt(b[:], a.nextLine) - if err != nil && err != io.EOF { - return applyError(err, lineNum(a.nextLine)) - } - if n > 0 { - return applyError(&Conflict{"src still has content after full delete"}, lineNum(a.nextLine)) - } - } - - return nil -} - -func applyTextLine(dst io.Writer, line Line, preimage [][]byte, i int64) (err error) { - if line.Old() && string(preimage[i]) != line.Line { - return &Conflict{"fragment line does not match src line"} - } - if line.New() { - _, err = io.WriteString(dst, line.Line) - } - return err -} - -// Flush writes any data following the last applied fragment to dst. -func (a *Applier) Flush(dst io.Writer) (err error) { - switch a.applyType { - case applyInitial: - _, err = copyFrom(dst, a.src, 0) - case applyText: - _, err = copyLinesFrom(dst, a.lineSrc, a.nextLine) - case applyBinary: - // nothing to flush, binary apply "consumes" full source - } - return err -} - -// ApplyBinaryFragment applies the changes in the fragment f and writes the -// result to dst. At most one binary fragment can be applied before a call to -// Reset. -func (a *Applier) ApplyBinaryFragment(dst io.Writer, f *BinaryFragment) error { - if a.applyType != applyInitial { - return applyError(errApplyInProgress) - } - defer func() { a.applyType = applyBinary }() - - if f == nil { - return applyError(errors.New("nil fragment")) - } - - switch f.Method { - case BinaryPatchLiteral: - if _, err := dst.Write(f.Data); err != nil { - return applyError(err) - } - case BinaryPatchDelta: - if err := applyBinaryDeltaFragment(dst, a.src, f.Data); err != nil { - return applyError(err) - } - default: - return applyError(fmt.Errorf("unsupported binary patch method: %v", f.Method)) - } - return nil -} - -func applyBinaryDeltaFragment(dst io.Writer, src io.ReaderAt, frag []byte) error { - srcSize, delta := readBinaryDeltaSize(frag) - if err := checkBinarySrcSize(src, srcSize); err != nil { + // nothing to apply, just copy all the data + _, err := copyFrom(dst, src, 0) return err } - - dstSize, delta := readBinaryDeltaSize(delta) - - for len(delta) > 0 { - op := delta[0] - if op == 0 { - return errors.New("invalid delta opcode 0") - } - - var n int64 - var err error - switch op & 0x80 { - case 0x80: - n, delta, err = applyBinaryDeltaCopy(dst, op, delta[1:], src) - case 0x00: - n, delta, err = applyBinaryDeltaAdd(dst, op, delta[1:]) - } - if err != nil { - return err - } - dstSize -= n - } - - if dstSize != 0 { - return errors.New("corrupt binary delta: insufficient or extra data") - } - return nil -} - -// readBinaryDeltaSize reads a variable length size from a delta-encoded binary -// fragment, returing the size and the unused data. Data is encoded as: -// -// [[1xxxxxxx]...] [0xxxxxxx] -// -// in little-endian order, with 7 bits of the value per byte. -func readBinaryDeltaSize(d []byte) (size int64, rest []byte) { - shift := uint(0) - for i, b := range d { - size |= int64(b&0x7F) << shift - shift += 7 - if b <= 0x7F { - return size, d[i+1:] - } - } - return size, nil -} - -// applyBinaryDeltaAdd applies an add opcode in a delta-encoded binary -// fragment, returning the amount of data written and the usused part of the -// fragment. An add operation takes the form: -// -// [0xxxxxx][[data1]...] -// -// where the lower seven bits of the opcode is the number of data bytes -// following the opcode. See also pack-format.txt in the Git source. -func applyBinaryDeltaAdd(w io.Writer, op byte, delta []byte) (n int64, rest []byte, err error) { - size := int(op) - if len(delta) < size { - return 0, delta, errors.New("corrupt binary delta: incomplete add") - } - _, err = w.Write(delta[:size]) - return int64(size), delta[size:], err -} - -// applyBinaryDeltaCopy applies a copy opcode in a delta-encoded binary -// fragment, returing the amount of data written and the unused part of the -// fragment. A copy operation takes the form: -// -// [1xxxxxxx][offset1][offset2][offset3][offset4][size1][size2][size3] -// -// where the lower seven bits of the opcode determine which non-zero offset and -// size bytes are present in little-endian order: if bit 0 is set, offset1 is -// present, etc. If no offset or size bytes are present, offset is 0 and size -// is 0x10000. See also pack-format.txt in the Git source. -func applyBinaryDeltaCopy(w io.Writer, op byte, delta []byte, src io.ReaderAt) (n int64, rest []byte, err error) { - const defaultSize = 0x10000 - - unpack := func(start, bits uint) (v int64) { - for i := uint(0); i < bits; i++ { - mask := byte(1 << (i + start)) - if op&mask > 0 { - if len(delta) == 0 { - err = errors.New("corrupt binary delta: incomplete copy") - return - } - v |= int64(delta[0]) << (8 * i) - delta = delta[1:] - } - } - return - } - - offset := unpack(0, 4) - size := unpack(4, 3) - if err != nil { - return 0, delta, err - } - if size == 0 { - size = defaultSize - } - - // TODO(bkeyes): consider pooling these buffers - b := make([]byte, size) - if _, err := src.ReadAt(b, offset); err != nil { - return 0, delta, err - } - - _, err = w.Write(b) - return size, delta, err -} - -func checkBinarySrcSize(r io.ReaderAt, size int64) error { - ok, err := isLen(r, size) - if err != nil { - return err - } - if !ok { - return &Conflict{"fragment src size does not match actual src size"} - } - return nil } diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_binary.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_binary.go new file mode 100644 index 00000000..b34772d5 --- /dev/null +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_binary.go @@ -0,0 +1,206 @@ +package gitdiff + +import ( + "errors" + "fmt" + "io" +) + +// BinaryApplier applies binary changes described in a fragment to source data. +// The applier must be closed after use. +type BinaryApplier struct { + dst io.Writer + src io.ReaderAt + + closed bool + dirty bool +} + +// NewBinaryApplier creates an BinaryApplier that reads data from src and +// writes modified data to dst. +func NewBinaryApplier(dst io.Writer, src io.ReaderAt) *BinaryApplier { + a := BinaryApplier{ + dst: dst, + src: src, + } + return &a +} + +// ApplyFragment applies the changes in the fragment f and writes the result to +// dst. ApplyFragment can be called at most once. +// +// If an error occurs while applying, ApplyFragment returns an *ApplyError that +// annotates the error with additional information. If the error is because of +// a conflict between a fragment and the source, the wrapped error will be a +// *Conflict. +func (a *BinaryApplier) ApplyFragment(f *BinaryFragment) error { + if f == nil { + return applyError(errors.New("nil fragment")) + } + if a.closed { + return applyError(errApplierClosed) + } + if a.dirty { + return applyError(errApplyInProgress) + } + + // mark an apply as in progress, even if it fails before making changes + a.dirty = true + + switch f.Method { + case BinaryPatchLiteral: + if _, err := a.dst.Write(f.Data); err != nil { + return applyError(err) + } + case BinaryPatchDelta: + if err := applyBinaryDeltaFragment(a.dst, a.src, f.Data); err != nil { + return applyError(err) + } + default: + return applyError(fmt.Errorf("unsupported binary patch method: %v", f.Method)) + } + return nil +} + +// Close writes any data following the last applied fragment and prevents +// future calls to ApplyFragment. +func (a *BinaryApplier) Close() (err error) { + if a.closed { + return nil + } + + a.closed = true + if !a.dirty { + _, err = copyFrom(a.dst, a.src, 0) + } else { + // do nothing, applying a binary fragment copies all data + } + return err +} + +func applyBinaryDeltaFragment(dst io.Writer, src io.ReaderAt, frag []byte) error { + srcSize, delta := readBinaryDeltaSize(frag) + if err := checkBinarySrcSize(src, srcSize); err != nil { + return err + } + + dstSize, delta := readBinaryDeltaSize(delta) + + for len(delta) > 0 { + op := delta[0] + if op == 0 { + return errors.New("invalid delta opcode 0") + } + + var n int64 + var err error + switch op & 0x80 { + case 0x80: + n, delta, err = applyBinaryDeltaCopy(dst, op, delta[1:], src) + case 0x00: + n, delta, err = applyBinaryDeltaAdd(dst, op, delta[1:]) + } + if err != nil { + return err + } + dstSize -= n + } + + if dstSize != 0 { + return errors.New("corrupt binary delta: insufficient or extra data") + } + return nil +} + +// readBinaryDeltaSize reads a variable length size from a delta-encoded binary +// fragment, returing the size and the unused data. Data is encoded as: +// +// [[1xxxxxxx]...] [0xxxxxxx] +// +// in little-endian order, with 7 bits of the value per byte. +func readBinaryDeltaSize(d []byte) (size int64, rest []byte) { + shift := uint(0) + for i, b := range d { + size |= int64(b&0x7F) << shift + shift += 7 + if b <= 0x7F { + return size, d[i+1:] + } + } + return size, nil +} + +// applyBinaryDeltaAdd applies an add opcode in a delta-encoded binary +// fragment, returning the amount of data written and the usused part of the +// fragment. An add operation takes the form: +// +// [0xxxxxx][[data1]...] +// +// where the lower seven bits of the opcode is the number of data bytes +// following the opcode. See also pack-format.txt in the Git source. +func applyBinaryDeltaAdd(w io.Writer, op byte, delta []byte) (n int64, rest []byte, err error) { + size := int(op) + if len(delta) < size { + return 0, delta, errors.New("corrupt binary delta: incomplete add") + } + _, err = w.Write(delta[:size]) + return int64(size), delta[size:], err +} + +// applyBinaryDeltaCopy applies a copy opcode in a delta-encoded binary +// fragment, returing the amount of data written and the unused part of the +// fragment. A copy operation takes the form: +// +// [1xxxxxxx][offset1][offset2][offset3][offset4][size1][size2][size3] +// +// where the lower seven bits of the opcode determine which non-zero offset and +// size bytes are present in little-endian order: if bit 0 is set, offset1 is +// present, etc. If no offset or size bytes are present, offset is 0 and size +// is 0x10000. See also pack-format.txt in the Git source. +func applyBinaryDeltaCopy(w io.Writer, op byte, delta []byte, src io.ReaderAt) (n int64, rest []byte, err error) { + const defaultSize = 0x10000 + + unpack := func(start, bits uint) (v int64) { + for i := uint(0); i < bits; i++ { + mask := byte(1 << (i + start)) + if op&mask > 0 { + if len(delta) == 0 { + err = errors.New("corrupt binary delta: incomplete copy") + return + } + v |= int64(delta[0]) << (8 * i) + delta = delta[1:] + } + } + return + } + + offset := unpack(0, 4) + size := unpack(4, 3) + if err != nil { + return 0, delta, err + } + if size == 0 { + size = defaultSize + } + + // TODO(bkeyes): consider pooling these buffers + b := make([]byte, size) + if _, err := src.ReadAt(b, offset); err != nil { + return 0, delta, err + } + + _, err = w.Write(b) + return size, delta, err +} + +func checkBinarySrcSize(r io.ReaderAt, size int64) error { + ok, err := isLen(r, size) + if err != nil { + return err + } + if !ok { + return &Conflict{"fragment src size does not match actual src size"} + } + return nil +} diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_text.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_text.go new file mode 100644 index 00000000..43af83a1 --- /dev/null +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/apply_text.go @@ -0,0 +1,152 @@ +package gitdiff + +import ( + "io" +) + +// TextApplier applies changes described in text fragments to source data. If +// changes are described in multiple fragments, those fragments must be applied +// in order. The applier must be closed after use. +// +// By default, TextApplier operates in "strict" mode, where fragment content +// and positions must exactly match those of the source. +type TextApplier struct { + dst io.Writer + src io.ReaderAt + lineSrc LineReaderAt + nextLine int64 + + closed bool + dirty bool +} + +// NewTextApplier creates a TextApplier that reads data from src and writes +// modified data to dst. If src implements LineReaderAt, it is used directly. +func NewTextApplier(dst io.Writer, src io.ReaderAt) *TextApplier { + a := TextApplier{ + dst: dst, + src: src, + } + + if lineSrc, ok := src.(LineReaderAt); ok { + a.lineSrc = lineSrc + } else { + a.lineSrc = &lineReaderAt{r: src} + } + + return &a +} + +// ApplyFragment applies the changes in the fragment f, writing unwritten data +// before the start of the fragment and any changes from the fragment. If +// multiple text fragments apply to the same content, ApplyFragment must be +// called in order of increasing start position. As a result, each fragment can +// be applied at most once. +// +// If an error occurs while applying, ApplyFragment returns an *ApplyError that +// annotates the error with additional information. If the error is because of +// a conflict between the fragment and the source, the wrapped error will be a +// *Conflict. +func (a *TextApplier) ApplyFragment(f *TextFragment) error { + if a.closed { + return applyError(errApplierClosed) + } + + // mark an apply as in progress, even if it fails before making changes + a.dirty = true + + // application code assumes fragment fields are consistent + if err := f.Validate(); err != nil { + return applyError(err) + } + + // lines are 0-indexed, positions are 1-indexed (but new files have position = 0) + fragStart := f.OldPosition - 1 + if fragStart < 0 { + fragStart = 0 + } + fragEnd := fragStart + f.OldLines + + start := a.nextLine + if fragStart < start { + return applyError(&Conflict{"fragment overlaps with an applied fragment"}) + } + + if f.OldPosition == 0 { + ok, err := isLen(a.src, 0) + if err != nil { + return applyError(err) + } + if !ok { + return applyError(&Conflict{"cannot create new file from non-empty src"}) + } + } + + preimage := make([][]byte, fragEnd-start) + n, err := a.lineSrc.ReadLinesAt(preimage, start) + if err != nil { + return applyError(err, lineNum(start+int64(n))) + } + + // copy leading data before the fragment starts + for i, line := range preimage[:fragStart-start] { + if _, err := a.dst.Write(line); err != nil { + a.nextLine = start + int64(i) + return applyError(err, lineNum(a.nextLine)) + } + } + preimage = preimage[fragStart-start:] + + // apply the changes in the fragment + used := int64(0) + for i, line := range f.Lines { + if err := applyTextLine(a.dst, line, preimage, used); err != nil { + a.nextLine = fragStart + used + return applyError(err, lineNum(a.nextLine), fragLineNum(i)) + } + if line.Old() { + used++ + } + } + a.nextLine = fragStart + used + + // new position of +0,0 mean a full delete, so check for leftovers + if f.NewPosition == 0 && f.NewLines == 0 { + var b [1][]byte + n, err := a.lineSrc.ReadLinesAt(b[:], a.nextLine) + if err != nil && err != io.EOF { + return applyError(err, lineNum(a.nextLine)) + } + if n > 0 { + return applyError(&Conflict{"src still has content after full delete"}, lineNum(a.nextLine)) + } + } + + return nil +} + +func applyTextLine(dst io.Writer, line Line, preimage [][]byte, i int64) (err error) { + if line.Old() && string(preimage[i]) != line.Line { + return &Conflict{"fragment line does not match src line"} + } + if line.New() { + _, err = io.WriteString(dst, line.Line) + } + return err +} + +// Close writes any data following the last applied fragment and prevents +// future calls to ApplyFragment. +func (a *TextApplier) Close() (err error) { + if a.closed { + return nil + } + + a.closed = true + if !a.dirty { + _, err = copyFrom(a.dst, a.src, 0) + } else { + _, err = copyLinesFrom(a.dst, a.lineSrc, a.nextLine) + } + return err +} diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/file_header.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/file_header.go index fadfc4ee..77ceb5d8 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/file_header.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/file_header.go @@ -172,24 +172,88 @@ func (p *parser) ParseTraditionalFileHeader() (*File, error) { // If the names in the header do not match because the patch is a rename, // return an empty default name. func parseGitHeaderName(header string) (string, error) { - firstName, n, err := parseName(header, -1, 1) - if err != nil { - return "", err - } - - if n < len(header) && (header[n] == ' ' || header[n] == '\t') { - n++ - } - - secondName, _, err := parseName(header[n:], -1, 1) - if err != nil { - return "", err - } - - if firstName != secondName { + header = strings.TrimSuffix(header, "\n") + if len(header) == 0 { return "", nil } - return firstName, nil + + var err error + var first, second string + + // there are 4 cases to account for: + // + // 1) unquoted unquoted + // 2) unquoted "quoted" + // 3) "quoted" unquoted + // 4) "quoted" "quoted" + // + quote := strings.IndexByte(header, '"') + switch { + case quote < 0: + // case 1 + first = header + + case quote > 0: + // case 2 + first = header[:quote-1] + if !isSpace(header[quote-1]) { + return "", fmt.Errorf("missing separator") + } + + second, _, err = parseQuotedName(header[quote:]) + if err != nil { + return "", err + } + + case quote == 0: + // case 3 or case 4 + var n int + first, n, err = parseQuotedName(header) + if err != nil { + return "", err + } + + // git accepts multiple spaces after a quoted name, but not after an + // unquoted name, since the name might end with one or more spaces + for n < len(header) && isSpace(header[n]) { + n++ + } + if n == len(header) { + return "", nil + } + + if header[n] == '"' { + second, _, err = parseQuotedName(header[n:]) + if err != nil { + return "", err + } + } else { + second = header[n:] + } + } + + first = trimTreePrefix(first, 1) + if second != "" { + if first == trimTreePrefix(second, 1) { + return first, nil + } + return "", nil + } + + // at this point, both names are unquoted (case 1) + // since names may contain spaces, we can't use a known separator + // instead, look for a split that produces two equal names + + for i := 0; i < len(first)-1; i++ { + if !isSpace(first[i]) { + continue + } + second = trimTreePrefix(first[i+1:], 1) + if name := first[:i]; name == second { + return name, nil + } + } + return "", nil } // parseGitHeaderData parses a single line of metadata from a Git file header. @@ -260,12 +324,12 @@ func parseGitHeaderNewName(f *File, line, defaultName string) error { } func parseGitHeaderOldMode(f *File, line, defaultName string) (err error) { - f.OldMode, err = parseMode(line) + f.OldMode, err = parseMode(strings.TrimSpace(line)) return } func parseGitHeaderNewMode(f *File, line, defaultName string) (err error) { - f.NewMode, err = parseMode(line) + f.NewMode, err = parseMode(strings.TrimSpace(line)) return } @@ -283,25 +347,25 @@ func parseGitHeaderCreatedMode(f *File, line, defaultName string) error { func parseGitHeaderCopyFrom(f *File, line, defaultName string) (err error) { f.IsCopy = true - f.OldName, _, err = parseName(line, -1, 0) + f.OldName, _, err = parseName(line, 0, 0) return } func parseGitHeaderCopyTo(f *File, line, defaultName string) (err error) { f.IsCopy = true - f.NewName, _, err = parseName(line, -1, 0) + f.NewName, _, err = parseName(line, 0, 0) return } func parseGitHeaderRenameFrom(f *File, line, defaultName string) (err error) { f.IsRename = true - f.OldName, _, err = parseName(line, -1, 0) + f.OldName, _, err = parseName(line, 0, 0) return } func parseGitHeaderRenameTo(f *File, line, defaultName string) (err error) { f.IsRename = true - f.NewName, _, err = parseName(line, -1, 0) + f.NewName, _, err = parseName(line, 0, 0) return } @@ -349,14 +413,14 @@ func parseMode(s string) (os.FileMode, error) { // parseName extracts a file name from the start of a string and returns the // name and the index of the first character after the name. If the name is -// unquoted and term is non-negative, parsing stops at the first occurrence of -// term. Otherwise parsing of unquoted names stops at the first space or tab. +// unquoted and term is non-zero, parsing stops at the first occurrence of +// term. // // If the name is exactly "/dev/null", no further processing occurs. Otherwise, // if dropPrefix is greater than zero, that number of prefix components // separated by forward slashes are dropped from the name and any duplicate // slashes are collapsed. -func parseName(s string, term rune, dropPrefix int) (name string, n int, err error) { +func parseName(s string, term byte, dropPrefix int) (name string, n int, err error) { if len(s) > 0 && s[0] == '"' { name, n, err = parseQuotedName(s) } else { @@ -387,15 +451,12 @@ func parseQuotedName(s string) (name string, n int, err error) { return name, n, err } -func parseUnquotedName(s string, term rune) (name string, n int, err error) { +func parseUnquotedName(s string, term byte) (name string, n int, err error) { for n = 0; n < len(s); n++ { if s[n] == '\n' { break } - if term >= 0 && rune(s[n]) == term { - break - } - if term < 0 && (s[n] == ' ' || s[n] == '\t') { + if term > 0 && s[n] == term { break } } @@ -440,6 +501,17 @@ func cleanName(name string, drop int) string { return b.String() } +// trimTreePrefix removes up to n leading directory components from name. +func trimTreePrefix(name string, n int) string { + i := 0 + for ; i < len(name) && n > 0; i++ { + if name[i] == '/' { + n-- + } + } + return name[i:] +} + // hasEpochTimestamp returns true if the string ends with a POSIX-formatted // timestamp for the UNIX epoch after a tab character. According to git, this // is used by GNU diff to mark creations and deletions. @@ -455,7 +527,7 @@ func hasEpochTimestamp(s string) bool { // a valid timestamp can have optional ':' in zone specifier // remove that if it exists so we have a single format - if ts[len(ts)-3] == ':' { + if len(ts) >= 3 && ts[len(ts)-3] == ':' { ts = ts[:len(ts)-3] + ts[len(ts)-2:] } @@ -468,3 +540,7 @@ func hasEpochTimestamp(s string) bool { } return true } + +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' +} diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/io.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/io.go index af5c8473..81432380 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/io.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/io.go @@ -5,21 +5,23 @@ import ( "io" ) +const ( + byteBufferSize = 32 * 1024 // from io.Copy + lineBufferSize = 32 + indexBufferSize = 1024 +) + // LineReaderAt is the interface that wraps the ReadLinesAt method. // -// ReadLinesAt reads len(lines) into lines starting at line offset in the -// input source. It returns number of full lines read (0 <= n <= len(lines)) -// and any error encountered. Line numbers are zero-indexed. +// ReadLinesAt reads len(lines) into lines starting at line offset. It returns +// the number of lines read (0 <= n <= len(lines)) and any error encountered. +// Line numbers are zero-indexed. // // If n < len(lines), ReadLinesAt returns a non-nil error explaining why more // lines were not returned. // -// Each full line includes the line ending character(s). If the last line of -// the input does not have a line ending character, ReadLinesAt returns the -// content of the line and io.EOF. -// -// If the content of the input source changes after the first call to -// ReadLinesAt, the behavior of future calls is undefined. +// Lines read by ReadLinesAt include the newline character. The last line does +// not have a final newline character if the input ends without one. type LineReaderAt interface { ReadLinesAt(lines [][]byte, offset int64) (n int, err error) } @@ -65,7 +67,7 @@ func (r *lineReaderAt) ReadLinesAt(lines [][]byte, offset int64) (n int, err err lines[n] = buf[start:end] } - if n < count || buf[len(buf)-1] != '\n' { + if n < count { return n, io.EOF } return n, nil @@ -75,13 +77,9 @@ func (r *lineReaderAt) ReadLinesAt(lines [][]byte, offset int64) (n int, err err // for line or a read returns io.EOF. It returns an error if and only if there // is an error reading data. func (r *lineReaderAt) indexTo(line int64) error { - var buf [1024]byte - - var offset int64 - if len(r.index) > 0 { - offset = r.index[len(r.index)-1] - } + var buf [indexBufferSize]byte + offset := r.lastOffset() for int64(len(r.index)) < line { n, err := r.r.ReadAt(buf[:], offset) if err != nil && err != io.EOF { @@ -94,7 +92,7 @@ func (r *lineReaderAt) indexTo(line int64) error { } } if err == io.EOF { - if n > 0 && buf[n-1] != '\n' { + if offset > r.lastOffset() { r.index = append(r.index, offset) } r.eof = true @@ -104,6 +102,13 @@ func (r *lineReaderAt) indexTo(line int64) error { return nil } +func (r *lineReaderAt) lastOffset() int64 { + if n := len(r.index); n > 0 { + return r.index[n-1] + } + return 0 +} + // readBytes reads the bytes of the n lines starting at line and returns the // bytes and the offset of the first byte in the underlying source. func (r *lineReaderAt) readBytes(line, n int64) (b []byte, offset int64, err error) { @@ -147,11 +152,6 @@ func isLen(r io.ReaderAt, n int64) (bool, error) { return false, err } -const ( - byteBufferSize = 32 * 1024 // from io.Copy - lineBufferSize = 32 -) - // copyFrom writes bytes starting from offset off in src to dst stopping at the // end of src or at the first error. copyFrom returns the number of bytes // written and any error. diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/parser.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/parser.go index d44465a9..d2b29cb1 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/parser.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/parser.go @@ -12,6 +12,10 @@ import ( // Parse parses a patch with changes to one or more files. Any content before // the first file is returned as the second value. If an error occurs while // parsing, it returns all files parsed before the error. +// +// Parse expects to receive a single patch. If the input may contain multiple +// patches (for example, if it is an mbox file), callers should split it into +// individual patches and call Parse on each one. func Parse(r io.Reader) ([]*File, string, error) { p := newParser(r) diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_header.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_header.go index ae508701..f047059b 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_header.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_header.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "io" + "io/ioutil" + "mime/quotedprintable" "net/mail" "strconv" "strings" @@ -13,8 +15,9 @@ import ( ) const ( - mailHeaderPrefix = "From " - prettyHeaderPrefix = "commit " + mailHeaderPrefix = "From " + prettyHeaderPrefix = "commit " + mailMinimumHeaderPrefix = "From:" ) // PatchHeader is a parsed version of the preamble content that appears before @@ -65,55 +68,6 @@ func (h *PatchHeader) Message() string { return msg.String() } -// PatchIdentity identifies a person who authored or committed a patch. -type PatchIdentity struct { - Name string - Email string -} - -func (i PatchIdentity) String() string { - name := i.Name - if name == "" { - name = `""` - } - return fmt.Sprintf("%s <%s>", name, i.Email) -} - -// ParsePatchIdentity parses a patch identity string. A valid string contains a -// non-empty name followed by an email address in angle brackets. Like Git, -// ParsePatchIdentity does not require that the email address is valid or -// properly formatted, only that it is non-empty. The name must not contain a -// left angle bracket, '<', and the email address must not contain a right -// angle bracket, '>'. -func ParsePatchIdentity(s string) (PatchIdentity, error) { - var emailStart, emailEnd int - for i, c := range s { - if c == '<' && emailStart == 0 { - emailStart = i + 1 - } - if c == '>' && emailStart > 0 { - emailEnd = i - break - } - } - if emailStart > 0 && emailEnd == 0 { - return PatchIdentity{}, fmt.Errorf("invalid identity string: unclosed email section: %s", s) - } - - var name, email string - if emailStart > 0 { - name = strings.TrimSpace(s[:emailStart-1]) - } - if emailStart > 0 && emailEnd > 0 { - email = strings.TrimSpace(s[emailStart:emailEnd]) - } - if name == "" || email == "" { - return PatchIdentity{}, fmt.Errorf("invalid identity string: %s", s) - } - - return PatchIdentity{Name: name, Email: email}, nil -} - // ParsePatchDate parses a patch date string. It returns the parsed time or an // error if s has an unknown format. ParsePatchDate supports the iso, rfc, // short, raw, unix, and default formats (with local variants) used by the @@ -162,57 +116,97 @@ func ParsePatchDate(s string) (time.Time, error) { return time.Time{}, fmt.Errorf("unknown date format: %s", s) } -// ParsePatchHeader parses a preamble string as returned by Parse into a +// A PatchHeaderOption modifies the behavior of ParsePatchHeader. +type PatchHeaderOption func(*patchHeaderOptions) + +// SubjectCleanMode controls how ParsePatchHeader cleans subject lines when +// parsing mail-formatted patches. +type SubjectCleanMode int + +const ( + // SubjectCleanWhitespace removes leading and trailing whitespace. + SubjectCleanWhitespace SubjectCleanMode = iota + + // SubjectCleanAll removes leading and trailing whitespace, leading "Re:", + // "re:", and ":" strings, and leading strings enclosed by '[' and ']'. + // This is the default behavior of git (see `git mailinfo`) and this + // package. + SubjectCleanAll + + // SubjectCleanPatchOnly is the same as SubjectCleanAll, but only removes + // leading strings enclosed by '[' and ']' if they start with "PATCH". + SubjectCleanPatchOnly +) + +// WithSubjectCleanMode sets the SubjectCleanMode for header parsing. By +// default, uses SubjectCleanAll. +func WithSubjectCleanMode(m SubjectCleanMode) PatchHeaderOption { + return func(opts *patchHeaderOptions) { + opts.subjectCleanMode = m + } +} + +type patchHeaderOptions struct { + subjectCleanMode SubjectCleanMode +} + +// ParsePatchHeader parses the preamble string returned by [Parse] into a // PatchHeader. Due to the variety of header formats, some fields of the parsed // PatchHeader may be unset after parsing. // // Supported formats are the short, medium, full, fuller, and email pretty -// formats used by git diff, git log, and git show and the UNIX mailbox format -// used by git format-patch. +// formats used by `git diff`, `git log`, and `git show` and the UNIX mailbox +// format used by `git format-patch`. // -// If ParsePatchHeader detects that it is handling an email, it will -// remove extra content at the beginning of the title line, such as -// `[PATCH]` or `Re:` in the same way that `git mailinfo` does. -// SubjectPrefix will be set to the value of this removed string. -// (`git mailinfo` is the core part of `git am` that pulls information -// out of an individual mail.) +// When parsing mail-formatted headers, ParsePatchHeader tries to remove +// email-specific content from the title and body: // -// Additionally, if ParsePatchHeader detects that it's handling an -// email, it will remove a `---` line and put anything after it into -// BodyAppendix. +// - Based on the SubjectCleanMode, remove prefixes like reply markers and +// "[PATCH]" strings from the subject, saving any removed content in the +// SubjectPrefix field. Parsing always discards leading and trailing +// whitespace from the subject line. The default mode is SubjectCleanAll. // -// Those wishing the effect of a plain `git am` should use -// `PatchHeader.Title + "\n" + PatchHeader.Body` (or -// `PatchHeader.Message()`). Those wishing to retain the subject -// prefix and appendix material should use `PatchHeader.SubjectPrefix -// + PatchHeader.Title + "\n" + PatchHeader.Body + "\n" + -// PatchHeader.BodyAppendix`. -func ParsePatchHeader(s string) (*PatchHeader, error) { - r := bufio.NewReader(strings.NewReader(s)) +// - If the body contains a "---" line (3 hyphens), remove that line and any +// content after it from the body and save it in the BodyAppendix field. +// +// ParsePatchHeader tries to process content it does not understand wthout +// returning errors, but will return errors if well-identified content like +// dates or identies uses unknown or invalid formats. +func ParsePatchHeader(header string, options ...PatchHeaderOption) (*PatchHeader, error) { + opts := patchHeaderOptions{ + subjectCleanMode: SubjectCleanAll, // match git defaults + } + for _, optFn := range options { + optFn(&opts) + } - var line string - for { - var err error - line, err = r.ReadString('\n') - if err == io.EOF { - break - } - if err != nil { - return nil, err - } + header = strings.TrimSpace(header) + if header == "" { + return &PatchHeader{}, nil + } - line = strings.TrimSpace(line) - if len(line) > 0 { - break - } + var firstLine, rest string + if idx := strings.IndexByte(header, '\n'); idx >= 0 { + firstLine = header[:idx] + rest = header[idx+1:] + } else { + firstLine = header + rest = "" } switch { - case strings.HasPrefix(line, mailHeaderPrefix): - return parseHeaderMail(line, r) - case strings.HasPrefix(line, prettyHeaderPrefix): - return parseHeaderPretty(line, r) + case strings.HasPrefix(firstLine, mailHeaderPrefix): + return parseHeaderMail(firstLine, strings.NewReader(rest), opts) + + case strings.HasPrefix(firstLine, mailMinimumHeaderPrefix): + // With a minimum header, the first line is part of the actual mail + // content and needs to be parsed as part of the "rest" + return parseHeaderMail("", strings.NewReader(header), opts) + + case strings.HasPrefix(firstLine, prettyHeaderPrefix): + return parseHeaderPretty(firstLine, strings.NewReader(rest)) } + return nil, errors.New("unrecognized patch header format") } @@ -227,7 +221,7 @@ func parseHeaderPretty(prettyLine string, r io.Reader) (*PatchHeader, error) { h := &PatchHeader{} - prettyLine = prettyLine[len(prettyHeaderPrefix):] + prettyLine = strings.TrimPrefix(prettyLine, prettyHeaderPrefix) if i := strings.IndexByte(prettyLine, ' '); i > 0 { h.SHA = prettyLine[:i] } else { @@ -291,7 +285,7 @@ func parseHeaderPretty(prettyLine string, r io.Reader) (*PatchHeader, error) { h.Title = title if title != "" { - // Don't check for an appendix + // Don't check for an appendix, pretty headers do not contain them body, _ := scanMessageBody(s, indent, false) if s.Err() != nil { return nil, s.Err() @@ -360,7 +354,7 @@ func scanMessageBody(s *bufio.Scanner, indent string, separateAppendix bool) (st return body.String(), appendix.String() } -func parseHeaderMail(mailLine string, r io.Reader) (*PatchHeader, error) { +func parseHeaderMail(mailLine string, r io.Reader, opts patchHeaderOptions) (*PatchHeader, error) { msg, err := mail.ReadMessage(r) if err != nil { return nil, err @@ -368,21 +362,20 @@ func parseHeaderMail(mailLine string, r io.Reader) (*PatchHeader, error) { h := &PatchHeader{} - mailLine = mailLine[len(mailHeaderPrefix):] - if i := strings.IndexByte(mailLine, ' '); i > 0 { - h.SHA = mailLine[:i] + if strings.HasPrefix(mailLine, mailHeaderPrefix) { + mailLine = strings.TrimPrefix(mailLine, mailHeaderPrefix) + if i := strings.IndexByte(mailLine, ' '); i > 0 { + h.SHA = mailLine[:i] + } } - addrs, err := msg.Header.AddressList("From") - if err != nil && !errors.Is(err, mail.ErrHeaderNotPresent) { - return nil, err - } - if len(addrs) > 0 { - addr := addrs[0] - if addr.Name == "" { - return nil, fmt.Errorf("invalid user string: %s", addr) + from := msg.Header.Get("From") + if from != "" { + u, err := ParsePatchIdentity(from) + if err != nil { + return nil, err } - h.Author = &PatchIdentity{Name: addr.Name, Email: addr.Address} + h.Author = &u } date := msg.Header.Get("Date") @@ -395,7 +388,7 @@ func parseHeaderMail(mailLine string, r io.Reader) (*PatchHeader, error) { } subject := msg.Header.Get("Subject") - h.SubjectPrefix, h.Title = parseSubject(subject) + h.SubjectPrefix, h.Title = cleanSubject(subject, opts.subjectCleanMode) s := bufio.NewScanner(msg.Body) h.Body, h.BodyAppendix = scanMessageBody(s, "", true) @@ -406,23 +399,24 @@ func parseHeaderMail(mailLine string, r io.Reader) (*PatchHeader, error) { return h, nil } -// Takes an email subject and returns the patch prefix and commit -// title. i.e., `[PATCH v3 3/5] Implement foo` would return `[PATCH -// v3 3/5] ` and `Implement foo` -func parseSubject(s string) (string, string) { - // This is meant to be compatible with - // https://github.com/git/git/blob/master/mailinfo.c:cleanup_subject(). - // If compatibility with `git am` drifts, go there to see if there - // are any updates. +func cleanSubject(s string, mode SubjectCleanMode) (prefix string, subject string) { + switch mode { + case SubjectCleanAll, SubjectCleanPatchOnly: + case SubjectCleanWhitespace: + return "", strings.TrimSpace(decodeSubject(s)) + default: + panic(fmt.Sprintf("unknown clean mode: %d", mode)) + } + + // Based on the algorithm from Git in mailinfo.c:cleanup_subject() + // If compatibility with `git am` drifts, go there to see if there are any updates. at := 0 for at < len(s) { switch s[at] { case 'r', 'R': // Detect re:, Re:, rE: and RE: - if at+2 < len(s) && - (s[at+1] == 'e' || s[at+1] == 'E') && - s[at+2] == ':' { + if at+2 < len(s) && (s[at+1] == 'e' || s[at+1] == 'E') && s[at+2] == ':' { at += 3 continue } @@ -433,23 +427,44 @@ func parseSubject(s string) (string, string) { continue case '[': - // Look for closing parenthesis - j := at + 1 - for ; j < len(s); j++ { - if s[j] == ']' { - break + if i := strings.IndexByte(s[at:], ']'); i > 0 { + if mode == SubjectCleanAll || strings.Contains(s[at:at+i+1], "PATCH") { + at += i + 1 + continue } } - - if j < len(s) { - at = j + 1 - continue - } } - // Only loop if we actually removed something + // Nothing was removed, end processing break } - return s[:at], s[at:] + prefix = strings.TrimLeftFunc(s[:at], unicode.IsSpace) + subject = strings.TrimRightFunc(decodeSubject(s[at:]), unicode.IsSpace) + return +} + +// Decodes a subject line. Currently only supports quoted-printable UTF-8. This format is the result +// of a `git format-patch` when the commit title has a non-ASCII character (i.e. an emoji). +// See for reference: https://stackoverflow.com/questions/27695749/gmail-api-not-respecting-utf-encoding-in-subject +func decodeSubject(encoded string) string { + if !strings.HasPrefix(encoded, "=?UTF-8?q?") { + // not UTF-8 encoded + return encoded + } + + // If the subject is too long, `git format-patch` may produce a subject line across + // multiple lines. When parsed, this can look like the following: + // + payload := " " + encoded + payload = strings.ReplaceAll(payload, " =?UTF-8?q?", "") + payload = strings.ReplaceAll(payload, "?=", "") + + decoded, err := ioutil.ReadAll(quotedprintable.NewReader(strings.NewReader(payload))) + if err != nil { + // if err, abort decoding and return original subject + return encoded + } + + return string(decoded) } diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_identity.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_identity.go new file mode 100644 index 00000000..018f80c7 --- /dev/null +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/patch_identity.go @@ -0,0 +1,166 @@ +package gitdiff + +import ( + "fmt" + "strings" +) + +// PatchIdentity identifies a person who authored or committed a patch. +type PatchIdentity struct { + Name string + Email string +} + +func (i PatchIdentity) String() string { + name := i.Name + if name == "" { + name = `""` + } + return fmt.Sprintf("%s <%s>", name, i.Email) +} + +// ParsePatchIdentity parses a patch identity string. A patch identity contains +// an email address and an optional name in [RFC 5322] format. This is either a +// plain email adddress or a name followed by an address in angle brackets: +// +// author@example.com +// Author Name +// +// If the input is not one of these formats, ParsePatchIdentity applies a +// heuristic to separate the name and email portions. If both the name and +// email are missing or empty, ParsePatchIdentity returns an error. It +// otherwise does not validate the result. +// +// [RFC 5322]: https://datatracker.ietf.org/doc/html/rfc5322 +func ParsePatchIdentity(s string) (PatchIdentity, error) { + s = normalizeSpace(s) + s = unquotePairs(s) + + var name, email string + if at := strings.IndexByte(s, '@'); at >= 0 { + start, end := at, at + for start >= 0 && !isRFC5332Space(s[start]) && s[start] != '<' { + start-- + } + for end < len(s) && !isRFC5332Space(s[end]) && s[end] != '>' { + end++ + } + email = s[start+1 : end] + + // Adjust the boundaries so that we drop angle brackets, but keep + // spaces when removing the email to form the name. + if start < 0 || s[start] != '<' { + start++ + } + if end >= len(s) || s[end] != '>' { + end-- + } + name = s[:start] + s[end+1:] + } else { + start, end := 0, 0 + for i := 0; i < len(s); i++ { + if s[i] == '<' && start == 0 { + start = i + 1 + } + if s[i] == '>' && start > 0 { + end = i + break + } + } + if start > 0 && end >= start { + email = strings.TrimSpace(s[start:end]) + name = s[:start-1] + } + } + + // After extracting the email, the name might contain extra whitespace + // again and may be surrounded by comment characters. The git source gives + // these examples of when this can happen: + // + // "Name " + // "email@domain (Name)" + // "Name (Comment)" + // + name = normalizeSpace(name) + if strings.HasPrefix(name, "(") && strings.HasSuffix(name, ")") { + name = name[1 : len(name)-1] + } + name = strings.TrimSpace(name) + + // If the name is empty or contains email-like characters, use the email + // instead (assuming one exists) + if name == "" || strings.ContainsAny(name, "@<>") { + name = email + } + + if name == "" && email == "" { + return PatchIdentity{}, fmt.Errorf("invalid identity string %q", s) + } + return PatchIdentity{Name: name, Email: email}, nil +} + +// unquotePairs process the RFC5322 tokens "quoted-string" and "comment" to +// remove any "quoted-pairs" (backslash-espaced characters). It also removes +// the quotes from any quoted strings, but leaves the comment delimiters. +func unquotePairs(s string) string { + quote := false + comments := 0 + escaped := false + + var out strings.Builder + for i := 0; i < len(s); i++ { + if escaped { + escaped = false + } else { + switch s[i] { + case '\\': + // quoted-pair is only allowed in quoted-string/comment + if quote || comments > 0 { + escaped = true + continue // drop '\' character + } + + case '"': + if comments == 0 { + quote = !quote + continue // drop '"' character + } + + case '(': + if !quote { + comments++ + } + case ')': + if comments > 0 { + comments-- + } + } + } + out.WriteByte(s[i]) + } + return out.String() +} + +// normalizeSpace trims leading and trailing whitespace from s and converts +// inner sequences of one or more whitespace characters to single spaces. +func normalizeSpace(s string) string { + var sb strings.Builder + for i := 0; i < len(s); i++ { + c := s[i] + if !isRFC5332Space(c) { + if sb.Len() > 0 && isRFC5332Space(s[i-1]) { + sb.WriteByte(' ') + } + sb.WriteByte(c) + } + } + return sb.String() +} + +func isRFC5332Space(c byte) bool { + switch c { + case '\t', '\n', '\r', ' ': + return true + } + return false +} diff --git a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/text.go b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/text.go index 04de8f4c..ee307923 100644 --- a/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/text.go +++ b/vendor/github.com/bluekeyes/go-gitdiff/gitdiff/text.go @@ -79,14 +79,8 @@ func (p *parser) ParseTextChunk(frag *TextFragment) error { return p.Errorf(0, "no content following fragment header") } - isNoNewlineLine := func(s string) bool { - // test for "\ No newline at end of file" by prefix because the text - // changes by locale (git claims all versions are at least 12 chars) - return len(s) >= 12 && s[:2] == "\\ " - } - oldLines, newLines := frag.OldLines, frag.NewLines - for { + for oldLines > 0 || newLines > 0 { line := p.Line(0) op, data := line[0], line[1:] @@ -113,13 +107,14 @@ func (p *parser) ParseTextChunk(frag *TextFragment) error { frag.LinesAdded++ frag.TrailingContext = 0 frag.Lines = append(frag.Lines, Line{OpAdd, data}) - default: + case '\\': // this may appear in middle of fragment if it's for a deleted line - if isNoNewlineLine(line) { - last := &frag.Lines[len(frag.Lines)-1] - last.Line = strings.TrimSuffix(last.Line, "\n") + if isNoNewlineMarker(line) { + removeLastNewline(frag) break } + fallthrough + default: // TODO(bkeyes): if this is because we hit the next header, it // would be helpful to return the miscounts line error. We could // either test for the common headers ("@@ -", "diff --git") or @@ -128,11 +123,6 @@ func (p *parser) ParseTextChunk(frag *TextFragment) error { return p.Errorf(0, "invalid line operation: %q", op) } - next := p.Line(1) - if oldLines <= 0 && newLines <= 0 && !isNoNewlineLine(next) { - break - } - if err := p.Next(); err != nil { if err == io.EOF { break @@ -145,13 +135,35 @@ func (p *parser) ParseTextChunk(frag *TextFragment) error { hdr := max(frag.OldLines-oldLines, frag.NewLines-newLines) + 1 return p.Errorf(-hdr, "fragment header miscounts lines: %+d old, %+d new", -oldLines, -newLines) } - - if err := p.Next(); err != nil && err != io.EOF { - return err + if frag.LinesAdded == 0 && frag.LinesDeleted == 0 { + return p.Errorf(0, "fragment contains no changes") } + + // check for a final "no newline" marker since it is not included in the + // counters used to stop the loop above + if isNoNewlineMarker(p.Line(0)) { + removeLastNewline(frag) + if err := p.Next(); err != nil && err != io.EOF { + return err + } + } + return nil } +func isNoNewlineMarker(s string) bool { + // test for "\ No newline at end of file" by prefix because the text + // changes by locale (git claims all versions are at least 12 chars) + return len(s) >= 12 && s[:2] == "\\ " +} + +func removeLastNewline(frag *TextFragment) { + if len(frag.Lines) > 0 { + last := &frag.Lines[len(frag.Lines)-1] + last.Line = strings.TrimSuffix(last.Line, "\n") + } +} + func parseRange(s string) (start int64, end int64, err error) { parts := strings.SplitN(s, ",", 2) diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b7..33c88305 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c..78bddf1c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a4..78f95f25 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bb..118e49e8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5f..05f5e7df 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd..cf9d42ae 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE new file mode 100644 index 00000000..67edaa90 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/LICENSE @@ -0,0 +1,57 @@ +Copyright (c) 2019 Cloudflare. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Cloudflare nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================== + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/BUILD.bazel b/vendor/github.com/cloudflare/circl/dh/x25519/BUILD.bazel new file mode 100644 index 00000000..d1669e59 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "x25519", + srcs = [ + "curve.go", + "curve_amd64.go", + "curve_amd64.h", + "curve_amd64.s", + "curve_generic.go", + "curve_noasm.go", + "doc.go", + "key.go", + "table.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/dh/x25519", + importpath = "github.com/cloudflare/circl/dh/x25519", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/math/fp25519", + ] + select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go new file mode 100644 index 00000000..f9057c2b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go @@ -0,0 +1,96 @@ +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + fp.SetOne(&w[1]) // x1 = 1 + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e, + 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb, + 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad, + 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 255 + const h = 3 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 255 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [5]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve25519 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) point of order 4 on Curve25519 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (x,_,1) first point of order 8 on Curve25519 */ + 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, + }, + { /* (x,_,1) second point of order 8 on Curve25519 */ + 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, + }, + { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */ + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go new file mode 100644 index 00000000..8a3d54c5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h new file mode 100644 index 00000000..8c1ae4d0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s new file mode 100644 index 00000000..ce9f0628 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -0,0 +1,157 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" + +// Depends on circl/math/fp25519 package +#include "../../math/fp25519/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve25519 +#define CTE_A24 121666 + +#define Size 32 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R13, FLAGS +// Instr: x86_64, cmov +#define multiplyA24Leg(z,x) \ + MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \ + MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \ + MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \ + MOVL $CTE_A24, AX; MULQ 24+x; \ + ADDQ R12, R9; \ + ADCQ R13, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R12, FLAGS +// Instr: x86_64, cmov, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R10; \ + MULXQ 8+x, R9, R11; ADDQ R10, R9; \ + MULXQ 16+x, R10, AX; ADCQ R11, R10; \ + MULXQ 24+x, R11, R12; ADCQ AX, R11; \ + ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \ + MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ DX, R12; \ + ADDQ R12, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, R12; \ + CMOVQCS DX, R12; \ + ADDQ R12, R8; MOVQ R8, 0+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp255.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + + +// func ladderStepAmd64(w *[5]fp255.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(w *[5]fp255.Elt, b uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// w = (mu,x1,z1,x2,z2) are five fp.Elt, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp255.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go new file mode 100644 index 00000000..dae67ea3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go @@ -0,0 +1,85 @@ +package x25519 + +import ( + "encoding/binary" + "math/bits" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +func doubleGeneric(x, z *fp.Elt) { + t0, t1 := &fp.Elt{}, &fp.Elt{} + fp.AddSub(x, z) + fp.Sqr(x, x) + fp.Sqr(z, z) + fp.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z) + fp.Mul(x, x, z) + fp.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp.Cswap(x1, x2, b) + fp.Cswap(z1, z2, b) + fp.AddSub(x1, z1) + fp.Mul(z1, z1, mu) + fp.AddSub(x1, z1) + fp.Sqr(x1, x1) + fp.Sqr(z1, z1) + fp.Mul(x1, x1, z2) + fp.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp.Elt{} + t1 := &fp.Elt{} + fp.AddSub(x2, z2) + fp.AddSub(x3, z3) + fp.Mul(t0, x2, z3) + fp.Mul(t1, x3, z2) + fp.AddSub(t0, t1) + fp.Cmov(x2, x3, b) + fp.Cmov(z2, z3, b) + fp.Sqr(x3, t0) + fp.Sqr(z3, t1) + fp.Mul(z3, x1, z3) + fp.Sqr(x2, x2) + fp.Sqr(z2, z2) + fp.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z2) + fp.Mul(x2, x2, z2) + fp.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp.Elt) { + const A24 = 121666 + const n = 8 + var xx [4]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + + var c3 uint64 + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, 0, c2) + xx[0], _ = bits.Add64(l0, (-c3)&38, 0) + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go new file mode 100644 index 00000000..07fab97d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go new file mode 100644 index 00000000..3ce102d1 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go @@ -0,0 +1,19 @@ +/* +Package x25519 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x25519 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go new file mode 100644 index 00000000..c76f72ac --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/key.go @@ -0,0 +1,47 @@ +package x25519 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +// Size is the length in bytes of a X25519 key. +const Size = 32 + +// Key represents a X25519 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 248 + k[31] = (k[31] & 127) | 64 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + validPk[31] &= (1 << (255 % 8)) - 1 + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go new file mode 100644 index 00000000..28c8c4ac --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/table.go @@ -0,0 +1,268 @@ +package x25519 + +import "github.com/cloudflare/circl/math/fp25519" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (256)*(256/8) = 8192 bytes. +var tableGenerator = [256 * fp25519.Size]byte{ + /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, + /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51, + /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b, + /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30, + /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17, + /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c, + /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29, + /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69, + /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a, + /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d, + /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d, + /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75, + /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75, + /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e, + /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f, + /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37, + /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41, + /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d, + /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20, + /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51, + /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a, + /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61, + /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29, + /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d, + /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31, + /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02, + /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f, + /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23, + /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18, + /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27, + /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60, + /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79, + /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13, + /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f, + /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74, + /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f, + /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23, + /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29, + /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40, + /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a, + /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26, + /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d, + /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39, + /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48, + /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07, + /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70, + /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62, + /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26, + /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37, + /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32, + /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d, + /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26, + /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a, + /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c, + /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f, + /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a, + /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25, + /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e, + /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75, + /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77, + /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b, + /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c, + /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61, + /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44, + /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a, + /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69, + /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49, + /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e, + /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59, + /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44, + /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79, + /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f, + /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e, + /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e, + /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56, + /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04, + /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22, + /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61, + /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29, + /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a, + /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f, + /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68, + /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c, + /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61, + /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14, + /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f, + /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34, + /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e, + /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01, + /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c, + /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23, + /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a, + /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b, + /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18, + /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c, + /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72, + /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b, + /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40, + /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00, + /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45, + /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e, + /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70, + /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37, + /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09, + /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62, + /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a, + /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13, + /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f, + /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66, + /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04, + /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47, + /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48, + /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31, + /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56, + /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22, + /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77, + /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a, + /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32, + /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25, + /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e, + /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67, + /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68, + /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e, + /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d, + /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48, + /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47, + /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a, + /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c, + /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26, + /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c, + /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72, + /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c, + /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06, + /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11, + /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10, + /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b, + /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b, + /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50, + /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15, + /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c, + /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35, + /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f, + /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61, + /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e, + /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66, + /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d, + /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a, + /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76, + /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02, + /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35, + /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d, + /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f, + /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52, + /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07, + /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a, + /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77, + /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34, + /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c, + /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d, + /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58, + /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30, + /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28, + /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a, + /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60, + /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74, + /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67, + /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48, + /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e, + /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58, + /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f, + /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27, + /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b, + /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74, + /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06, + /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63, + /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b, + /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a, + /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a, + /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f, + /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f, + /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29, + /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07, + /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f, + /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d, + /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44, + /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21, + /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e, + /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49, + /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56, + /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59, + /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c, + /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61, + /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d, + /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09, + /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e, + /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f, + /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c, + /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58, + /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53, + /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72, + /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58, + /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e, + /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d, + /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c, + /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07, + /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59, + /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e, + /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18, + /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e, + /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51, + /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54, + /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22, + /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43, + /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e, + /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43, + /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e, + /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69, + /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09, + /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42, + /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49, + /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53, + /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25, + /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51, + /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71, + /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48, + /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07, + /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c, + /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33, + /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27, + /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78, + /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50, + /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53, + /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a, + /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78, + /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48, + /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41, + /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16, + /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31, + /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05, + /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70, + /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e, + /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60, + /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52, + /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f, + /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25, + /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46, + /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f, + /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b, + /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31, + /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b, + /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d, + /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02, + /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05, + /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79, + /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b, + /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/BUILD.bazel b/vendor/github.com/cloudflare/circl/dh/x448/BUILD.bazel new file mode 100644 index 00000000..0e275f7a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "x448", + srcs = [ + "curve.go", + "curve_amd64.go", + "curve_amd64.h", + "curve_amd64.s", + "curve_generic.go", + "curve_noasm.go", + "doc.go", + "key.go", + "table.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/dh/x448", + importpath = "github.com/cloudflare/circl/dh/x448", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/math/fp448", + ] + select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go new file mode 100644 index 00000000..d59564e4 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve.go @@ -0,0 +1,104 @@ +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + w[1] = fp.Elt{ // x1 = S + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac, + 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23, + 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1, + 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29, + 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda, + 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a, + 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 448 + const h = 2 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 448 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [3]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve448 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) a point of order 4 on the twist of Curve448 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (-1,_,1) point of order 4 on Curve448 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go new file mode 100644 index 00000000..a0622666 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h new file mode 100644 index 00000000..8c1ae4d0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s new file mode 100644 index 00000000..ed33ba3d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -0,0 +1,194 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" + +// Depends on circl/math/fp448 package +#include "../../math/fp448/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve448 +#define CTE_A24 39082 + +#define Size 56 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, cmov, adx +#define multiplyA24Leg(z,x) \ + MOVQ $CTE_A24, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R9; \ + MULXQ 8+x, AX, R10; ADDQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCQ AX, R13; \ + MULXQ 48+x, AX, DX; ADCQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp448.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + +// func ladderStepAmd64(w *[5]fp448.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(work *[5]fp.Elt, swap uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +// This is Equation 7 at https://eprint.iacr.org/2017/264. +TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp448.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go new file mode 100644 index 00000000..b0b65ccf --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go @@ -0,0 +1,100 @@ +package x448 + +import ( + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/math/fp448" +) + +func doubleGeneric(x, z *fp448.Elt) { + t0, t1 := &fp448.Elt{}, &fp448.Elt{} + fp448.AddSub(x, z) + fp448.Sqr(x, x) + fp448.Sqr(z, z) + fp448.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z) + fp448.Mul(x, x, z) + fp448.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp448.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp448.Cswap(x1, x2, b) + fp448.Cswap(z1, z2, b) + fp448.AddSub(x1, z1) + fp448.Mul(z1, z1, mu) + fp448.AddSub(x1, z1) + fp448.Sqr(x1, x1) + fp448.Sqr(z1, z1) + fp448.Mul(x1, x1, z2) + fp448.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp448.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp448.Elt{} + t1 := &fp448.Elt{} + fp448.AddSub(x2, z2) + fp448.AddSub(x3, z3) + fp448.Mul(t0, x2, z3) + fp448.Mul(t1, x3, z2) + fp448.AddSub(t0, t1) + fp448.Cmov(x2, x3, b) + fp448.Cmov(z2, z3, b) + fp448.Sqr(x3, t0) + fp448.Sqr(z3, t1) + fp448.Mul(z3, x1, z3) + fp448.Sqr(x2, x2) + fp448.Sqr(z2, z2) + fp448.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z2) + fp448.Mul(x2, x2, z2) + fp448.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp448.Elt) { + const A24 = 39082 + const n = 8 + var xx [7]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + h4, l4 := bits.Mul64(xx[4], A24) + h5, l5 := bits.Mul64(xx[5], A24) + h6, l6 := bits.Mul64(xx[6], A24) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, c3 := bits.Add64(h3, l4, c2) + l5, c4 := bits.Add64(h4, l5, c3) + l6, c5 := bits.Add64(h5, l6, c4) + l7, _ := bits.Add64(h6, 0, c5) + + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + xx[0], c0 = bits.Add64(l0, l7, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, l7<<32, c2) + xx[4], c4 = bits.Add64(l4, 0, c3) + xx[5], c5 = bits.Add64(l5, 0, c4) + xx[6], _ = bits.Add64(l6, 0, c5) + + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go new file mode 100644 index 00000000..3755b7c8 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go new file mode 100644 index 00000000..c02904fe --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/doc.go @@ -0,0 +1,19 @@ +/* +Package x448 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x448 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go new file mode 100644 index 00000000..2fdde511 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/key.go @@ -0,0 +1,46 @@ +package x448 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Size is the length in bytes of a X448 key. +const Size = 56 + +// Key represents a X448 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 252 + k[55] |= 128 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go new file mode 100644 index 00000000..eef53c30 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/table.go @@ -0,0 +1,460 @@ +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (448)*(448/8) = 25088 bytes. +var tableGenerator = [448 * fp.Size]byte{ + /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28, + /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8, + /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03, + /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa, + /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9, + /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67, + /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e, + /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7, + /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a, + /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d, + /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60, + /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a, + /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae, + /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95, + /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b, + /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34, + /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06, + /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17, + /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d, + /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc, + /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde, + /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11, + /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98, + /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c, + /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed, + /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9, + /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f, + /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75, + /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7, + /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c, + /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b, + /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e, + /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28, + /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2, + /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9, + /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08, + /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8, + /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20, + /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29, + /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e, + /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57, + /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1, + /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d, + /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59, + /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba, + /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee, + /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1, + /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe, + /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae, + /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb, + /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71, + /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f, + /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41, + /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc, + /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef, + /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5, + /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14, + /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91, + /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a, + /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4, + /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08, + /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74, + /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9, + /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95, + /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa, + /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25, + /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc, + /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b, + /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85, + /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51, + /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc, + /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85, + /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5, + /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9, + /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b, + /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82, + /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99, + /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb, + /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1, + /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0, + /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42, + /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b, + /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb, + /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42, + /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd, + /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57, + /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e, + /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59, + /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81, + /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54, + /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b, + /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68, + /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9, + /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77, + /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb, + /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3, + /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0, + /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c, + /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01, + /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7, + /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1, + /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8, + /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda, + /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec, + /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90, + /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29, + /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20, + /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8, + /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d, + /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d, + /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6, + /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32, + /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47, + /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb, + /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e, + /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7, + /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f, + /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5, + /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c, + /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94, + /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf, + /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1, + /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f, + /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8, + /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e, + /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24, + /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac, + /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc, + /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69, + /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7, + /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51, + /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68, + /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8, + /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12, + /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29, + /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9, + /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd, + /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43, + /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9, + /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02, + /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70, + /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32, + /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10, + /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74, + /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f, + /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda, + /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf, + /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0, + /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b, + /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe, + /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e, + /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45, + /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9, + /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33, + /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8, + /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed, + /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43, + /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a, + /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1, + /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a, + /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9, + /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a, + /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29, + /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52, + /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45, + /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3, + /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04, + /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73, + /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4, + /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd, + /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa, + /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2, + /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed, + /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0, + /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22, + /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69, + /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea, + /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f, + /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91, + /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf, + /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a, + /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12, + /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54, + /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad, + /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9, + /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2, + /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3, + /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2, + /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1, + /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b, + /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27, + /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7, + /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d, + /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9, + /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52, + /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a, + /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c, + /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5, + /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74, + /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92, + /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4, + /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7, + /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1, + /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d, + /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69, + /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe, + /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5, + /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d, + /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e, + /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6, + /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e, + /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed, + /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51, + /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48, + /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b, + /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94, + /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c, + /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8, + /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7, + /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79, + /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2, + /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70, + /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82, + /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f, + /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa, + /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c, + /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02, + /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16, + /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76, + /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3, + /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c, + /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65, + /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f, + /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b, + /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6, + /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda, + /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b, + /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1, + /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1, + /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee, + /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22, + /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95, + /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f, + /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd, + /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5, + /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f, + /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6, + /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6, + /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b, + /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80, + /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4, + /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0, + /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95, + /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82, + /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18, + /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82, + /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53, + /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9, + /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8, + /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82, + /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7, + /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8, + /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3, + /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5, + /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87, + /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56, + /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65, + /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce, + /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a, + /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1, + /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0, + /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e, + /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45, + /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9, + /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63, + /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36, + /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55, + /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3, + /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc, + /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9, + /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35, + /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb, + /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33, + /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1, + /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48, + /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08, + /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7, + /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17, + /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16, + /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec, + /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92, + /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c, + /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b, + /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3, + /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8, + /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe, + /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60, + /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61, + /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b, + /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2, + /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23, + /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf, + /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96, + /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6, + /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb, + /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2, + /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7, + /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03, + /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57, + /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89, + /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f, + /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56, + /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27, + /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37, + /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5, + /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc, + /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5, + /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12, + /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28, + /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2, + /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97, + /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0, + /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14, + /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8, + /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64, + /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0, + /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28, + /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2, + /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9, + /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7, + /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b, + /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca, + /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86, + /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28, + /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02, + /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c, + /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc, + /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f, + /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00, + /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c, + /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96, + /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3, + /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75, + /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a, + /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4, + /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04, + /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1, + /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0, + /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08, + /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f, + /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18, + /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a, + /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38, + /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8, + /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35, + /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf, + /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d, + /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98, + /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e, + /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed, + /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad, + /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc, + /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1, + /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7, + /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b, + /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48, + /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93, + /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06, + /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4, + /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5, + /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5, + /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa, + /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9, + /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5, + /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1, + /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24, + /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda, + /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba, + /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7, + /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a, + /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10, + /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59, + /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d, + /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21, + /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5, + /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0, + /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc, + /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f, + /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0, + /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9, + /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7, + /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6, + /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f, + /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb, + /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb, + /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93, + /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c, + /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2, + /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a, + /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f, + /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5, + /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75, + /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f, + /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad, + /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f, + /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7, + /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e, + /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95, + /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0, + /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35, + /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9, + /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67, + /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd, + /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7, + /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce, + /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd, + /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a, + /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96, + /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24, + /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46, + /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7, + /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29, + /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87, + /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec, + /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3, + /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4, + /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2, + /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83, + /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67, + /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25, + /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d, + /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91, + /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9, + /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb, + /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87, + /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12, + /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2, + /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32, + /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc, + /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a, + /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36, + /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2, + /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e, + /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b, + /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd, + /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d, + /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/BUILD.bazel b/vendor/github.com/cloudflare/circl/ecc/goldilocks/BUILD.bazel new file mode 100644 index 00000000..8b7a0a89 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "goldilocks", + srcs = [ + "constants.go", + "curve.go", + "isogeny.go", + "point.go", + "scalar.go", + "twist.go", + "twistPoint.go", + "twistTables.go", + "twist_basemult.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/ecc/goldilocks", + importpath = "github.com/cloudflare/circl/ecc/goldilocks", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/internal/conv", + "//vendor/github.com/cloudflare/circl/math", + "//vendor/github.com/cloudflare/circl/math/fp448", + "//vendor/github.com/cloudflare/circl/math/mlsbset", + ], +) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go new file mode 100644 index 00000000..b6b236e5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go @@ -0,0 +1,71 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var ( + // genX is the x-coordinate of the generator of Goldilocks curve. + genX = fp.Elt{ + 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26, + 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43, + 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12, + 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea, + 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e, + 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22, + 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f, + } + // genY is the y-coordinate of the generator of Goldilocks curve. + genY = fp.Elt{ + 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98, + 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd, + 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a, + 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87, + 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b, + 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88, + 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69, + } + // paramD is -39081 in Fp. + paramD = fp.Elt{ + 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d, + // which is the number of points in the prime subgroup. + order = Scalar{ + 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23, + 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21, + 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4, + 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, + } + // residue448 is 2^448 mod order. + residue448 = [4]uint64{ + 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058, + } + // invFour is 1/4 mod order. + invFour = Scalar{ + 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48, + 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08, + 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71, + 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + } + // paramDTwist is -39082 in Fp. The D parameter of the twist curve. + paramDTwist = fp.Elt{ + 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go new file mode 100644 index 00000000..5a939100 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go @@ -0,0 +1,80 @@ +// Package goldilocks provides elliptic curve operations over the goldilocks curve. +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2. +type Curve struct{} + +// Identity returns the identity point. +func (Curve) Identity() *Point { + return &Point{ + y: fp.One(), + z: fp.One(), + } +} + +// IsOnCurve returns true if the point lies on the curve. +func (Curve) IsOnCurve(P *Point) bool { + x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + rhs, lhs := &fp.Elt{}, &fp.Elt{} + fp.Mul(t, &P.ta, &P.tb) // t = ta*tb + fp.Sqr(x2, &P.x) // x^2 + fp.Sqr(y2, &P.y) // y^2 + fp.Sqr(z2, &P.z) // z^2 + fp.Sqr(t2, t) // t^2 + fp.Add(lhs, x2, y2) // x^2 + y^2 + fp.Mul(rhs, t2, ¶mD) // dt^2 + fp.Add(rhs, rhs, z2) // z^2 + dt^2 + fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) + eq0 := fp.IsZero(lhs) + + fp.Mul(lhs, &P.x, &P.y) // xy + fp.Mul(rhs, t, &P.z) // tz + fp.Sub(lhs, lhs, rhs) // xy - tz + eq1 := fp.IsZero(lhs) + return eq0 && eq1 +} + +// Generator returns the generator point. +func (Curve) Generator() *Point { + return &Point{ + x: genX, + y: genY, + z: fp.One(), + ta: genX, + tb: genY, + } +} + +// Order returns the number of points in the prime subgroup. +func (Curve) Order() Scalar { return order } + +// Double returns 2P. +func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R } + +// Add returns P+Q. +func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R } + +// ScalarMult returns kP. This function runs in constant time. +func (e Curve) ScalarMult(k *Scalar, P *Point) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarMult(k4, e.push(P))) +} + +// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time. +func (e Curve) ScalarBaseMult(k *Scalar) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarBaseMult(k4)) +} + +// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time. +func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point { + m4 := &Scalar{} + n4 := &Scalar{} + m4.divBy4(m) + n4.divBy4(n) + return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P))) +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go new file mode 100644 index 00000000..b1daab85 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go @@ -0,0 +1,52 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) } +func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) } + +// push sends a point on the Goldilocks curve to a point on the twist curve. +func (Curve) push(P *Point) *twistPoint { + Q := &twistPoint{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + *d = *a // D = A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} + +// push sends a point on the twist curve to a point on the Goldilocks curve. +func (twistCurve) push(P *twistPoint) *Point { + Q := &Point{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Neg(d, a) // D = -A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go new file mode 100644 index 00000000..11f73de0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go @@ -0,0 +1,171 @@ +package goldilocks + +import ( + "errors" + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Point is a point on the Goldilocks Curve. +type Point struct{ x, y, z, ta, tb fp.Elt } + +func (P Point) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// FromAffine creates a point from affine coordinates. +func FromAffine(x, y *fp.Elt) (*Point, error) { + P := &Point{ + x: *x, + y: *y, + z: fp.One(), + ta: *x, + tb: *y, + } + if !(Curve{}).IsOnCurve(P) { + return P, errors.New("point not on curve") + } + return P, nil +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices are of the +// same length and are interpreted in little-endian order. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// FromBytes returns a point from the input buffer. +func FromBytes(in []byte) (*Point, error) { + if len(in) < fp.Size+1 { + return nil, errors.New("wrong input length") + } + err := errors.New("invalid decoding") + P := &Point{} + signX := in[fp.Size] >> 7 + copy(P.y[:], in[:fp.Size]) + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return nil, err + } + + u, v := &fp.Elt{}, &fp.Elt{} + one := fp.One() + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, &one) // u = y^2-1 + fp.Sub(v, v, &one) // v = dy^2-1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return nil, err + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return nil, err + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + P.z = fp.One() + return P, nil +} + +// IsIdentity returns true is P is the identity Point. +func (P *Point) IsIdentity() bool { + return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z +} + +// IsEqual returns true if P is equivalent to Q. +func (P *Point) IsEqual(Q *Point) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +// Neg obtains the inverse of the Point. +func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) } + +// ToAffine returns the x,y affine coordinates of P. +func (P *Point) ToAffine() (x, y fp.Elt) { + fp.Inv(&P.z, &P.z) // 1/z + fp.Mul(&P.x, &P.x, &P.z) // x/z + fp.Mul(&P.y, &P.y, &P.z) // y/z + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y + return P.x, P.y +} + +// ToBytes stores P into a slice of bytes. +func (P *Point) ToBytes(out []byte) error { + if len(out) < fp.Size+1 { + return errors.New("invalid decoding") + } + x, y := P.ToAffine() + out[fp.Size] = (x[0] & 1) << 7 + return fp.ToBytes(out[:fp.Size], &y) +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +func (P *Point) MarshalBinary() (data []byte, err error) { + data = make([]byte, fp.Size+1) + err = P.ToBytes(data[:fp.Size+1]) + return data, err +} + +// UnmarshalBinary must be able to decode the form generated by MarshalBinary. +func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err } + +// Double sets P = 2Q. +func (P *Point) Double() { P.Add(P) } + +// Add sets P =P+Q.. +func (P *Point) Add(Q *Point) { + // This is formula (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb + x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb + x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb + A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{} + fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1 + fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2 + fp.Mul(A, x1, x2) // A = x1*x2 + fp.Mul(B, y1, y2) // B = y1*y2 + fp.Mul(C, t1, t2) // t1*t2 + fp.Mul(C, C, ¶mD) // C = d*t1*t2 + fp.Mul(D, z1, z2) // D = z1*z2 + fp.Add(F, x1, y1) // x1+y1 + fp.Add(E, x2, y2) // x2+y2 + fp.Mul(E, E, F) // (x1+y1)*(x2+y2) + fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A + fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B + fp.Sub(F, D, C) // F = D-C + fp.Add(G, D, C) // G = D+C + fp.Sub(H, B, A) // H = B-A + fp.Mul(z3, F, G) // Z = F * G + fp.Mul(x3, E, F) // X = E * F + fp.Mul(y3, G, H) // Y = G * H, T = E * H +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go new file mode 100644 index 00000000..f98117b2 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go @@ -0,0 +1,203 @@ +package goldilocks + +import ( + "encoding/binary" + "math/bits" +) + +// ScalarSize is the size (in bytes) of scalars. +const ScalarSize = 56 // 448 / 8 + +// _N is the number of 64-bit words to store scalars. +const _N = 7 // 448 / 64 + +// Scalar represents a positive integer stored in little-endian order. +type Scalar [ScalarSize]byte + +type scalar64 [_N]uint64 + +func (z *scalar64) fromScalar(x *Scalar) { + z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8]) + z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8]) + z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8]) + z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8]) + z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8]) + z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8]) + z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8]) +} + +func (z *scalar64) toScalar(x *Scalar) { + binary.LittleEndian.PutUint64(x[0*8:1*8], z[0]) + binary.LittleEndian.PutUint64(x[1*8:2*8], z[1]) + binary.LittleEndian.PutUint64(x[2*8:3*8], z[2]) + binary.LittleEndian.PutUint64(x[3*8:4*8], z[3]) + binary.LittleEndian.PutUint64(x[4*8:5*8], z[4]) + binary.LittleEndian.PutUint64(x[5*8:6*8], z[5]) + binary.LittleEndian.PutUint64(x[6*8:7*8], z[6]) +} + +// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)). +func add(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Add64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Add64(zz[i], 0, c) + } + return c +} + +// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)). +func sub(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Sub64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Sub64(zz[i], 0, c) + } + return c +} + +// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1. +func mulWord(z, x []uint64, y uint64) { + for i := range z { + z[i] = 0 + } + carry := uint64(0) + for i := range x { + hi, lo := bits.Mul64(x[i], y) + lo, cc := bits.Add64(lo, z[i], 0) + hi, _ = bits.Add64(hi, 0, cc) + z[i], cc = bits.Add64(lo, carry, 0) + carry, _ = bits.Add64(hi, 0, cc) + } + z[len(x)] = carry +} + +// Cmov moves x into z if b=1. +func (z *scalar64) Cmov(b uint64, x *scalar64) { + m := uint64(0) - b + for i := range z { + z[i] = (z[i] &^ m) | (x[i] & m) + } +} + +// leftShift shifts to the left the words of z returning the more significant word. +func (z *scalar64) leftShift(low uint64) uint64 { + high := z[_N-1] + for i := _N - 1; i > 0; i-- { + z[i] = z[i-1] + } + z[0] = low + return high +} + +// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar. +func (z *scalar64) reduceOneWord(x uint64) { + prod := (&scalar64{})[:] + mulWord(prod, residue448[:], x) + cc := add(z[:], z[:], prod) + mulWord(prod, residue448[:], cc) + add(z[:], z[:], prod) +} + +// modOrder reduces z mod order. +func (z *scalar64) modOrder() { + var o64, x scalar64 + o64.fromScalar(&order) + // Performs: while (z >= order) { z = z-order } + // At most 8 (eight) iterations reduce 3 bits by subtracting. + for i := 0; i < 8; i++ { + c := sub(x[:], z[:], o64[:]) // (c || x) = z-order + z.Cmov(1-c, &x) // if c != 0 { z = x } + } +} + +// FromBytes stores z = x mod order, where x is a number stored in little-endian order. +func (z *Scalar) FromBytes(x []byte) { + n := len(x) + nCeil := (n + 7) >> 3 + for i := range z { + z[i] = 0 + } + if nCeil < _N { + copy(z[:], x) + return + } + copy(z[:], x[8*(nCeil-_N):]) + var z64 scalar64 + z64.fromScalar(z) + for i := nCeil - _N - 1; i >= 0; i-- { + low := binary.LittleEndian.Uint64(x[8*i:]) + high := z64.leftShift(low) + z64.reduceOneWord(high) + } + z64.modOrder() + z64.toScalar(z) +} + +// divBy4 calculates z = x/4 mod order. +func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) } + +// Red reduces z mod order. +func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) } + +// Neg calculates z = -z mod order. +func (z *Scalar) Neg() { z.Sub(&order, z) } + +// Add calculates z = x+y mod order. +func (z *Scalar) Add(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := add(z64[:], x64[:], y64[:]) + add(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Sub calculates z = x-y mod order. +func (z *Scalar) Sub(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := sub(z64[:], x64[:], y64[:]) + sub(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Mul calculates z = x*y mod order. +func (z *Scalar) Mul(x, y *Scalar) { + var z64, x64, y64 scalar64 + prod := (&[_N + 1]uint64{})[:] + x64.fromScalar(x) + y64.fromScalar(y) + mulWord(prod, x64[:], y64[_N-1]) + copy(z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N]) + for i := _N - 2; i >= 0; i-- { + h := z64.leftShift(0) + z64.reduceOneWord(h) + mulWord(prod, x64[:], y64[i]) + c := add(z64[:], z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N] + c) + } + z64.modOrder() + z64.toScalar(z) +} + +// IsZero returns true if z=0. +func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} } diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go new file mode 100644 index 00000000..83d7cdad --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -0,0 +1,138 @@ +package goldilocks + +import ( + "crypto/subtle" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp448" +) + +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. +type twistCurve struct{} + +// Identity returns the identity point. +func (twistCurve) Identity() *twistPoint { + return &twistPoint{ + y: fp.One(), + z: fp.One(), + } +} + +// subYDiv16 update x = (x - y) / 16. +func subYDiv16(x *scalar64, y int64) { + s := uint64(y >> 63) + x0, b0 := bits.Sub64((*x)[0], uint64(y), 0) + x1, b1 := bits.Sub64((*x)[1], s, b0) + x2, b2 := bits.Sub64((*x)[2], s, b1) + x3, b3 := bits.Sub64((*x)[3], s, b2) + x4, b4 := bits.Sub64((*x)[4], s, b3) + x5, b5 := bits.Sub64((*x)[5], s, b4) + x6, _ := bits.Sub64((*x)[6], s, b5) + x[0] = (x0 >> 4) | (x1 << 60) + x[1] = (x1 >> 4) | (x2 << 60) + x[2] = (x2 >> 4) | (x3 << 60) + x[3] = (x3 >> 4) | (x4 << 60) + x[4] = (x4 >> 4) | (x5 << 60) + x[5] = (x5 >> 4) | (x6 << 60) + x[6] = (x6 >> 4) +} + +func recodeScalar(d *[113]int8, k *Scalar) { + var k64 scalar64 + k64.fromScalar(k) + for i := 0; i < 112; i++ { + d[i] = int8((k64[0] & 0x1f) - 16) + subYDiv16(&k64, int64(d[i])) + } + d[112] = int8(k64[0]) +} + +// ScalarMult returns kP. +func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint { + var TabP [8]preTwistPointProy + var S preTwistPointProy + var d [113]int8 + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + recodeScalar(&d, k) + + P.oddMultiples(TabP[:]) + Q := e.Identity() + for i := 112; i >= 0; i-- { + Q.Double() + Q.Double() + Q.Double() + Q.Double() + mask := d[i] >> 7 + absDi := (d[i] + mask) ^ mask + inx := int32((absDi - 1) >> 1) + sig := int((d[i] >> 7) & 0x1) + for j := range TabP { + S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j)))) + } + S.cneg(sig) + Q.mixAdd(&S) + } + Q.cneg(uint(isEven)) + return Q +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// CombinedMult returns mG+nP. +func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]preTwistPointProy + P.oddMultiples(TabQ[:]) + Q := e.Identity() + for i := len(nafFix) - 1; i >= 0; i-- { + Q.Double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + Q.mixAddZ1(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + Q.mixAdd(&S) + } + } + return Q +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go new file mode 100644 index 00000000..c55db77b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go @@ -0,0 +1,135 @@ +package goldilocks + +import ( + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +type twistPoint struct{ x, y, z, ta, tb fp.Elt } + +type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt } + +type preTwistPointProy struct { + preTwistPointAffine + z2 fp.Elt +} + +func (P *twistPoint) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// cneg conditionally negates the point if b=1. +func (P *twistPoint) cneg(b uint) { + t := &fp.Elt{} + fp.Neg(t, &P.x) + fp.Cmov(&P.x, t, b) + fp.Neg(t, &P.ta) + fp.Cmov(&P.ta, t, b) +} + +// Double updates P with 2P. +func (P *twistPoint) Double() { + // This is formula (7) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1. +func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1) + P.coreAddition(Q) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *twistPoint) coreAddition(Q *preTwistPointAffine) { + // This is the formula following (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *preTwistPointAffine) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *preTwistPointAffine) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) { + fp.Cmov(&P.addYX, &Q.addYX, b) + fp.Cmov(&P.subYX, &Q.subYX, b) + fp.Cmov(&P.dt2, &Q.dt2, b) +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1. +func (P *twistPoint) mixAdd(Q *preTwistPointProy) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.preTwistPointAffine) +} + +// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T). +func (P *twistPoint) oddMultiples(T []preTwistPointProy) { + if n := len(T); n > 0 { + T[0].FromTwistPoint(P) + _2P := *P + _2P.Double() + R := &preTwistPointProy{} + R.FromTwistPoint(&_2P) + for i := 1; i < n; i++ { + P.mixAdd(R) + T[i].FromTwistPoint(P) + } + } +} + +// cmov conditionally moves Q into P if b=1. +func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) { + P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b) + fp.Cmov(&P.z2, &Q.z2, b) +} + +// FromTwistPoint precomputes some coordinates of Q for missed addition. +func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) { + fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y + fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X + fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb + fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T + fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T + fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go new file mode 100644 index 00000000..ed432e02 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go @@ -0,0 +1,216 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var tabFixMult = [fxV][fx2w1]preTwistPointAffine{ + { + { + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { + addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab}, + subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70}, + dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c}, + }, + { + addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb}, + subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d}, + dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde}, + }, + { + addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33}, + subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab}, + dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4}, + }, + }, + { + { + addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d}, + subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3}, + dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58}, + }, + { + addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf}, + subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49}, + dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68}, + }, + { + addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb}, + subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c}, + dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb}, + }, + { + addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66}, + subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd}, + dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff}, + }, + }, +} + +// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where +// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a +// 4-degree isogeny. +var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{ + { /* 1P*/ + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { /* 3P*/ + addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5}, + subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4}, + dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d}, + }, + { /* 5P*/ + addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f}, + subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38}, + dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82}, + }, + { /* 7P*/ + addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82}, + subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9}, + dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11}, + }, + { /* 9P*/ + addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac}, + subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90}, + dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10}, + }, + { /* 11P*/ + addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3}, + subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63}, + dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3}, + }, + { /* 13P*/ + addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b}, + subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c}, + dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7}, + }, + { /* 15P*/ + addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b}, + subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0}, + dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78}, + }, + { /* 17P*/ + addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97}, + subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6}, + dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5}, + }, + { /* 19P*/ + addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed}, + subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8}, + dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48}, + }, + { /* 21P*/ + addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02}, + subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62}, + dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f}, + }, + { /* 23P*/ + addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c}, + subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3}, + dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1}, + }, + { /* 25P*/ + addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82}, + subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06}, + dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44}, + }, + { /* 27P*/ + addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a}, + subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c}, + dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee}, + }, + { /* 29P*/ + addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f}, + subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82}, + dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e}, + }, + { /* 31P*/ + addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1}, + subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae}, + dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81}, + }, + { /* 33P*/ + addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a}, + subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34}, + dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2}, + }, + { /* 35P*/ + addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a}, + subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a}, + dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e}, + }, + { /* 37P*/ + addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17}, + subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98}, + dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7}, + }, + { /* 39P*/ + addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb}, + subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2}, + dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec}, + }, + { /* 41P*/ + addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15}, + subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51}, + dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49}, + }, + { /* 43P*/ + addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa}, + subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32}, + dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0}, + }, + { /* 45P*/ + addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89}, + subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4}, + dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1}, + }, + { /* 47P*/ + addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a}, + subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91}, + dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f}, + }, + { /* 49P*/ + addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09}, + subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89}, + dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b}, + }, + { /* 51P*/ + addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0}, + subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83}, + dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50}, + }, + { /* 53P*/ + addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3}, + subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63}, + dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18}, + }, + { /* 55P*/ + addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d}, + subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0}, + dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5}, + }, + { /* 57P*/ + addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b}, + subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88}, + dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb}, + }, + { /* 59P*/ + addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33}, + subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb}, + dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10}, + }, + { /* 61P*/ + addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04}, + subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13}, + dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75}, + }, + { /* 63P*/ + addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76}, + subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01}, + dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go new file mode 100644 index 00000000..f6ac5edb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go @@ -0,0 +1,62 @@ +package goldilocks + +import ( + "crypto/subtle" + + mlsb "github.com/cloudflare/circl/math/mlsbset" +) + +const ( + // MLSBRecoding parameters + fxT = 448 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) +) + +// ScalarBaseMult returns kG where G is the generator point. +func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint { + m, err := mlsb.New(fxT, fxV, fxW) + if err != nil { + panic(err) + } + if m.IsExtended() { + panic("not extended") + } + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + c, err := m.Encode(k[:]) + if err != nil { + panic(err) + } + + gP := c.Exp(groupMLSB{}) + P := gP.(*twistPoint) + P.cneg(uint(isEven)) + return P +} + +type groupMLSB struct{} + +func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil } +func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() } +func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) } +func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() } +func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} } +func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) { + Tabj := &tabFixMult[v] + P := a.(*preTwistPointAffine) + for k := range Tabj { + P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u))) + } + P.cneg(int(s >> 31)) +} diff --git a/vendor/github.com/cloudflare/circl/internal/conv/BUILD.bazel b/vendor/github.com/cloudflare/circl/internal/conv/BUILD.bazel new file mode 100644 index 00000000..2e89718c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/conv/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "conv", + srcs = ["conv.go"], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/internal/conv", + importpath = "github.com/cloudflare/circl/internal/conv", + visibility = ["//vendor/github.com/cloudflare/circl:__subpackages__"], +) diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go new file mode 100644 index 00000000..649a8e93 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/conv/conv.go @@ -0,0 +1,140 @@ +package conv + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" +) + +// BytesLe2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func BytesLe2Hex(x []byte) string { + b := &strings.Builder{} + b.Grow(2*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%02x", x[i]) + } + return b.String() +} + +// BytesLe2BigInt converts a little-endian slice x into a big-endian +// math/big.Int. +func BytesLe2BigInt(x []byte) *big.Int { + n := len(x) + b := new(big.Int) + if len(x) > 0 { + y := make([]byte, n) + for i := 0; i < n; i++ { + y[n-1-i] = x[i] + } + b.SetBytes(y) + } + return b +} + +// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64. +func BytesBe2Uint64Le(x []byte) []uint64 { + l := len(x) + z := make([]uint64, (l+7)/8) + blocks := l / 8 + for i := 0; i < blocks; i++ { + z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):]) + } + remBytes := l % 8 + for i := 0; i < remBytes; i++ { + z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i) + } + return z +} + +// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2BytesLe(z []byte, x *big.Int) { + xLen := (x.BitLen() + 7) >> 3 + zLen := len(z) + if zLen >= xLen && x.Sign() >= 0 { + y := x.Bytes() + for i := 0; i < xLen; i++ { + z[i] = y[xLen-1-i] + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } + } +} + +// Uint64Le2BigInt converts a little-endian slice x into a big number. +func Uint64Le2BigInt(x []uint64) *big.Int { + n := len(x) + b := new(big.Int) + var bi big.Int + for i := n - 1; i >= 0; i-- { + bi.SetUint64(x[i]) + b.Lsh(b, 64) + b.Add(b, &bi) + } + return b +} + +// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes. +func Uint64Le2BytesLe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.LittleEndian.PutUint64(b[i*8:], x[i]) + } + return b +} + +// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes. +func Uint64Le2BytesBe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.BigEndian.PutUint64(b[i*8:], x[n-1-i]) + } + return b +} + +// Uint64Le2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func Uint64Le2Hex(x []uint64) string { + b := new(strings.Builder) + b.Grow(16*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%016x", x[i]) + } + return b.String() +} + +// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2Uint64Le(z []uint64, x *big.Int) { + xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words + zLen := len(z) + if zLen >= xLen && x.Sign() > 0 { + var y, yi big.Int + y.Set(x) + two64 := big.NewInt(1) + two64.Lsh(two64, 64).Sub(two64, big.NewInt(1)) + for i := 0; i < xLen; i++ { + yi.And(&y, two64) + z[i] = yi.Uint64() + y.Rsh(&y, 64) + } + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/BUILD.bazel b/vendor/github.com/cloudflare/circl/internal/sha3/BUILD.bazel new file mode 100644 index 00000000..037412f3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sha3", + srcs = [ + "doc.go", + "hashes.go", + "keccakf.go", + "rc.go", + "sha3.go", + "sha3_s390x.s", + "shake.go", + "xor.go", + "xor_generic.go", + "xor_unaligned.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/internal/sha3", + importpath = "github.com/cloudflare/circl/internal/sha3", + visibility = ["//vendor/github.com/cloudflare/circl:__subpackages__"], +) diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go new file mode 100644 index 00000000..7e023090 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go new file mode 100644 index 00000000..7d2365a7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() State { + return State{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() State { + return State{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() State { + return State{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() State { + return State{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go new file mode 100644 index 00000000..1755fd1e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// KeccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +// If turbo is true, applies the 12-round variant instead of the +// regular 24-round variant. +// nolint:funlen +func KeccakF1600(a *[25]uint64, turbo bool) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + i := 0 + + if turbo { + i = 12 + } + + for ; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go new file mode 100644 index 00000000..6a3df42f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go @@ -0,0 +1,29 @@ +package sha3 + +// RC stores the round constants for use in the ι step. +var RC = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go new file mode 100644 index 00000000..a0df5aa6 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +func (d *State) buf() []byte { + return d.storage.asBytes()[d.bufo:d.bufe] +} + +type State struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + bufo int // offset of buffer in storage + bufe int // end of buffer in storage + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing + turbo bool // Whether we're using 12 rounds instead of 24 +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *State) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *State) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *State) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.bufo = 0 + d.bufe = 0 +} + +func (d *State) clone() *State { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *State) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf()) + d.bufe = 0 + d.bufo = 0 + KeccakF1600(&d.a, d.turbo) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + KeccakF1600(&d.a, d.turbo) + d.bufe = d.rate + d.bufo = 0 + copyOut(d, d.buf()) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *State) padAndPermute(dsbyte byte) { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf() because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + zerosStart := d.bufe + 1 + d.bufe = d.rate + buf := d.buf() + buf[zerosStart-1] = dsbyte + for i := zerosStart; i < d.rate; i++ { + buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.bufe = d.rate + copyOut(d, buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *State) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + written = len(p) + + for len(p) > 0 { + bufl := d.bufe - d.bufo + if bufl == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + KeccakF1600(&d.a, d.turbo) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - bufl + if todo > len(p) { + todo = len(p) + } + d.bufe += todo + buf := d.buf() + copy(buf[bufl:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.bufe == d.rate { + d.permute() + } + } + } + + return written, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *State) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + buf := d.buf() + n := copy(out, buf) + d.bufo += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.bufo == d.bufe { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *State) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + _, _ = dup.Read(hash) + return append(in, hash...) +} + +func (d *State) IsAbsorbing() bool { + return d.state == spongeAbsorbing +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s new file mode 100644 index 00000000..8a4458f6 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go new file mode 100644 index 00000000..77817f75 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + rate128 = 168 + rate256 = 136 +) + +// Clone returns copy of SHAKE context within its current state. +func (d *State) Clone() ShakeHash { + return d.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() State { + return State{rate: rate128, dsbyte: dsbyteShake} +} + +// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake128(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate128, dsbyte: D, turbo: true} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() State { + return State{rate: rate256, dsbyte: dsbyteShake} +} + +// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake256(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate256, dsbyte: D, turbo: true} +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum128 writes an arbitrary-length digest of data into hash. +func TurboShakeSum128(hash, data []byte, D byte) { + h := NewTurboShake128(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum256 writes an arbitrary-length digest of data into hash. +func TurboShakeSum256(hash, data []byte, D byte) { + h := NewTurboShake256(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +func (d *State) SwitchDS(D byte) { + d.dsbyte = D +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go new file mode 100644 index 00000000..1e213374 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || appengine +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go new file mode 100644 index 00000000..2b0c6617 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine) +// +build !amd64 appengine +// +build !386 appengine +// +build !ppc64le appengine + +package sha3 + +import "encoding/binary" + +// xorIn xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorIn(d *State, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOut copies ulint64s to a byte buffer. +func copyOut(d *State, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go new file mode 100644 index 00000000..052fc8d3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !appengine +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInuses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorIn(d *State, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOut(d *State, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} diff --git a/vendor/github.com/cloudflare/circl/math/BUILD.bazel b/vendor/github.com/cloudflare/circl/math/BUILD.bazel new file mode 100644 index 00000000..55b828d5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "math", + srcs = [ + "primes.go", + "wnaf.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/math", + importpath = "github.com/cloudflare/circl/math", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/BUILD.bazel b/vendor/github.com/cloudflare/circl/math/fp25519/BUILD.bazel new file mode 100644 index 00000000..beb8fb12 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "fp25519", + srcs = [ + "fp.go", + "fp_amd64.go", + "fp_amd64.h", + "fp_amd64.s", + "fp_generic.go", + "fp_noasm.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/math/fp25519", + importpath = "github.com/cloudflare/circl/math/fp25519", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/internal/conv", + ] + select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go new file mode 100644 index 00000000..57a50ff5 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go @@ -0,0 +1,205 @@ +// Package fp25519 provides prime field arithmetic over GF(2^255-19). +package fp25519 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 32 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^255-19. +var p = Elt{ + 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, +} + +// P returns the prime modulus 2^255-19. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is +// indicated by returning isQR = true. Otherwise, when x/y is a quadratic +// non-residue, z will have an undetermined value and isQR = false. +func InvSqrt(z, x, y *Elt) (isQR bool) { + sqrtMinusOne := &Elt{ + 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4, + 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f, + 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b, + 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b, + } + t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{} + + Mul(t0, x, y) // t0 = u*v + Sqr(t1, y) // t1 = v^2 + Mul(t2, t0, t1) // t2 = u*v^3 + Sqr(t0, t1) // t0 = v^4 + Mul(t1, t0, t2) // t1 = u*v^7 + + var Tab [4]*Elt + Tab[0] = &Elt{} + Tab[1] = &Elt{} + Tab[2] = t3 + Tab[3] = t1 + + *Tab[0] = *t1 + Sqr(Tab[0], Tab[0]) + Sqr(Tab[1], Tab[0]) + Sqr(Tab[1], Tab[1]) + Mul(Tab[1], Tab[1], Tab[3]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[0], Tab[0]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[1], Tab[0]) + for i := 0; i < 4; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 4; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[0]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 14; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 29; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 59; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + for i := 0; i < 5; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 124; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[2], Tab[2]) + Sqr(Tab[2], Tab[2]) + Mul(Tab[2], Tab[2], Tab[3]) + + Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8 + // Checking whether y z^2 == x + Sqr(t0, z) // t0 = z^2 + Mul(t0, t0, y) // t0 = yz^2 + Sub(t1, t0, x) // t1 = t0-u + Add(t2, t0, x) // t2 = t0+u + if IsZero(t1) { + return true + } else if IsZero(t2) { + Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1) + return true + } else { + return false + } +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + x0, x1, x2 := &Elt{}, &Elt{}, &Elt{} + Sqr(x1, x) + Sqr(x0, x1) + Sqr(x0, x0) + Mul(x0, x0, x) + Mul(z, x0, x1) + Sqr(x1, z) + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 4; i++ { + Sqr(x1, x1) + } + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 9; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + Sqr(x2, x1) + for i := 0; i < 19; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x1) + for i := 0; i < 10; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x0) + Sqr(x0, x2) + for i := 0; i < 49; i++ { + Sqr(x0, x0) + } + Mul(x0, x0, x2) + Sqr(x1, x0) + for i := 0; i < 99; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + for i := 0; i < 50; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x2) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { modp(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go new file mode 100644 index 00000000..057f0d28 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go @@ -0,0 +1,45 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp25519 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } +func modp(z *Elt) { modpAmd64(z) } + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) + +//go:noescape +func modpAmd64(z *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h new file mode 100644 index 00000000..b884b584 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h @@ -0,0 +1,351 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define additionLeg(z,x,y) \ + MOVL $38, AX; \ + MOVL $0, DX; \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov, adx +#define additionAdx(z,x,y) \ + MOVL $38, AX; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + CMOVQCS AX, DX ; \ + XORL AX, AX; \ + ADCXQ DX, R8; \ + ADCXQ AX, R9; MOVQ R9, 8+z; \ + ADCXQ AX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + MOVL $38, DX; \ + CMOVQCS DX, AX; \ + ADDQ AX, R8; MOVQ R8, 0+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define subtraction(z,x,y) \ + MOVL $38, AX; \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; MOVQ R8, 0+z; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \ + MOVQ 8+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \ + MOVQ 16+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \ + MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \ + MOVQ 24+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R13, R15; \ + ADCQ R14, R10; MOVQ R10, 16+z; \ + ADCQ AX, R11; MOVQ R11, 24+z; \ + ADCQ $0, DX; MOVQ DX, 32+z; \ + MOVQ 8+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 8+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 16+z, R9; MOVQ R9, R15; \ + ADCQ 24+z, R10; MOVQ R10, 24+z; \ + ADCQ 32+z, R11; MOVQ R11, 32+z; \ + ADCQ $0, DX; MOVQ DX, 40+z; \ + MOVQ 16+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 16+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 24+z, R9; MOVQ R9, R15; \ + ADCQ 32+z, R10; MOVQ R10, 32+z; \ + ADCQ 40+z, R11; MOVQ R11, 40+z; \ + ADCQ $0, DX; MOVQ DX, 48+z; \ + MOVQ 24+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 24+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 32+z, R9; MOVQ R9, 32+z; \ + ADCQ 40+z, R10; MOVQ R10, 40+z; \ + ADCQ 48+z, R11; MOVQ R11, 48+z; \ + ADCQ $0, DX; MOVQ DX, 56+z; + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + MOVQ 0+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \ + MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \ + MOVQ 24+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \ + \ + ADDQ R14, R10;\ + ADCQ R15, R11; MOVL $0, R15;\ + ADCQ CX, R12;\ + ADCQ AX, R13;\ + ADCQ $0, DX; MOVQ DX, R14;\ + MOVQ 8+x, AX; MULQ 16+x;\ + \ + ADDQ AX, R11;\ + ADCQ DX, R12;\ + ADCQ $0, R13;\ + ADCQ $0, R14;\ + ADCQ $0, R15;\ + \ + SHLQ $1, R14, R15; MOVQ R15, 56+z;\ + SHLQ $1, R13, R14; MOVQ R14, 48+z;\ + SHLQ $1, R12, R13; MOVQ R13, 40+z;\ + SHLQ $1, R11, R12; MOVQ R12, 32+z;\ + SHLQ $1, R10, R11; MOVQ R11, 24+z;\ + SHLQ $1, R9, R10; MOVQ R10, 16+z;\ + SHLQ $1, R9; MOVQ R9, 8+z;\ + \ + MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\ + MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\ + MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\ + MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\ + \ + ADDQ 8+z, R9; MOVQ R9, 8+z;\ + ADCQ 16+z, R10; MOVQ R10, 16+z;\ + ADCQ 24+z, R11; MOVQ R11, 24+z;\ + ADCQ 32+z, R12; MOVQ R12, 32+z;\ + ADCQ 40+z, R13; MOVQ R13, 40+z;\ + ADCQ 48+z, R14; MOVQ R14, 48+z;\ + ADCQ 56+z, R15; MOVQ R15, 56+z; + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + MOVQ 0+x, DX; /* A[0] */ \ + MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \ + MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \ + MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \ + MOVQ 24+x, DX; /* A[3] */ \ + MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \ + MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \ + MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \ + MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \ + XORL R15, R15; \ + ADOXQ AX, R10; ADCXQ R8, R8; \ + ADOXQ CX, R11; ADCXQ R9, R9; \ + ADOXQ R15, R12; ADCXQ R10, R10; \ + ADOXQ R15, R13; ADCXQ R11, R11; \ + ADOXQ R15, R14; ADCXQ R12, R12; \ + ;;;;;;;;;;;;;;; ADCXQ R13, R13; \ + ;;;;;;;;;;;;;;; ADCXQ R14, R14; \ + MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \ + ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \ + ADDQ CX, R8; MOVQ R8, 8+z; \ + MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \ + ADCQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R10; MOVQ R10, 24+z; \ + MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \ + ADCQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R12; MOVQ R12, 40+z; \ + MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \ + ADCQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R14; MOVQ R14, 56+z; + +// reduceFromDouble finds z congruent to x modulo p such that 0> 63) + // PUT BIT 255 IN CARRY FLAG AND CLEAR + x3 &^= 1 << 63 + + x0, c0 := bits.Add64(x0, cx, 0) + x1, c1 := bits.Add64(x1, 0, c0) + x2, c2 := bits.Add64(x2, 0, c1) + x3, _ = bits.Add64(x3, 0, c2) + + // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19 + // cx = C[255] ? 0 : 19 + cx = uint64(19) &^ (-(x3 >> 63)) + // CLEAR BIT 255 + x3 &^= 1 << 63 + + x0, c0 = bits.Sub64(x0, cx, 0) + x1, c1 = bits.Sub64(x1, 0, c0) + x2, c2 = bits.Sub64(x2, 0, c1) + x3, _ = bits.Sub64(x3, 0, c2) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) +} + +func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) { + h0, l0 := bits.Mul64(x4, 38) + h1, l1 := bits.Mul64(x5, 38) + h2, l2 := bits.Mul64(x6, 38) + h3, l3 := bits.Mul64(x7, 38) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + l0, c0 = bits.Add64(l0, x0, 0) + l1, c1 = bits.Add64(l1, x1, c0) + l2, c2 = bits.Add64(l2, x2, c1) + l3, c3 := bits.Add64(l3, x3, c2) + l4, _ = bits.Add64(l4, 0, c3) + + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + z1, c1 := bits.Add64(l1, 0, c0) + z2, c2 := bits.Add64(l2, 0, c1) + z3, c3 := bits.Add64(l3, 0, c2) + z0, _ := bits.Add64(l0, (-c3)&38, 0) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go new file mode 100644 index 00000000..26ca4d01 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go @@ -0,0 +1,13 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp25519 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } +func modp(z *Elt) { modpGeneric(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/BUILD.bazel b/vendor/github.com/cloudflare/circl/math/fp448/BUILD.bazel new file mode 100644 index 00000000..19dff708 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "fp448", + srcs = [ + "fp.go", + "fp_amd64.go", + "fp_amd64.h", + "fp_amd64.s", + "fp_generic.go", + "fp_noasm.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/math/fp448", + importpath = "github.com/cloudflare/circl/math/fp448", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/internal/conv", + ] + select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go new file mode 100644 index 00000000..a5e36600 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp.go @@ -0,0 +1,164 @@ +// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1). +package fp448 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 56 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^448-2^224-1. +var p = Elt{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +} + +// P returns the prime modulus 2^448-2^224-1. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// IsOne returns true if x is equal to 1. +func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{1} } + +// One returns the 1 element. +func One() (x Elt) { x = Elt{1}; return } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { Sub(z, z, &p) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so, +// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue, +// and z = sqrt(-x/y). +func InvSqrt(z, x, y *Elt) (isQR bool) { + // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x + // so that's x if x is a quadratic residue and -x otherwise. + // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y). + // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y). + // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y). + t0, t1 := &Elt{}, &Elt{} + Mul(t0, x, y) // x*y + Sqr(t1, y) // y^2 + Mul(t1, t0, t1) // x*y^3 + powPminus3div4(z, t1) // (x*y^3)^k + Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1) + + // Check if x/y is a quadratic residue + Sqr(t0, z) // z^2 + Mul(t0, t0, y) // y*z^2 + Sub(t0, t0, x) // y*z^2-x + return IsZero(t0) +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4. + t := &Elt{} + powPminus3div4(t, x) // t = x^k + Sqr(t, t) // t = x^2k + Sqr(t, t) // t = x^4k + Mul(z, t, x) // z = x^(4k+1) +} + +// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. +func powPminus3div4(z, x *Elt) { + x0, x1 := &Elt{}, &Elt{} + Sqr(z, x) + Mul(z, z, x) + Sqr(x0, z) + Mul(x0, x0, x) + Sqr(z, x0) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 11; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 26; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 53; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 110; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + Mul(z, z, x) + for i := 0; i < 223; i++ { + Sqr(z, z) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go new file mode 100644 index 00000000..6a12209a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go @@ -0,0 +1,43 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp448 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } + +/* Functions defined in fp_amd64.s */ + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h new file mode 100644 index 00000000..536fe5bd --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h @@ -0,0 +1,591 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \ + MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \ + MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \ + MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \ + MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \ + MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \ + MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define additionLeg(z,x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ R8, 0+z; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ DX, R11; MOVQ R11, 24+z; \ + ADCQ $0, R12; MOVQ R12, 32+z; \ + ADCQ $0, R13; MOVQ R13, 40+z; \ + ADCQ $0, R14; MOVQ R14, 48+z; + + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, adx +#define additionAdx(z,x,y) \ + MOVL $32, R15; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + MOVQ 32+x, R12; ADCXQ 32+y, R12; \ + MOVQ 40+x, R13; ADCXQ 40+y, R13; \ + MOVQ 48+x, R14; ADCXQ 48+y, R14; \ + ;;;;;;;;;;;;;;; ADCXQ DX, DX; \ + XORL AX, AX; \ + ADCXQ DX, R8; SHLXQ R15, DX, DX; \ + ADCXQ AX, R9; \ + ADCXQ AX, R10; \ + ADCXQ DX, R11; \ + ADCXQ AX, R12; \ + ADCXQ AX, R13; \ + ADCXQ AX, R14; \ + ADCXQ AX, AX; \ + XORL DX, DX; \ + ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \ + ADCXQ DX, R9; MOVQ R9, 8+z; \ + ADCXQ DX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + ADCXQ DX, R12; MOVQ R12, 32+z; \ + ADCXQ DX, R13; MOVQ R13, 40+z; \ + ADCXQ DX, R14; MOVQ R14, 48+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define subtraction(z,x,y) \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVQ 32+x, R12; SBBQ 32+y, R12; \ + MOVQ 40+x, R13; SBBQ 40+y, R13; \ + MOVQ 48+x, R14; SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+z; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ DX, R11; MOVQ R11, 24+z; \ + SBBQ $0, R12; MOVQ R12, 32+z; \ + SBBQ $0, R13; MOVQ R13, 40+z; \ + SBBQ $0, R14; MOVQ R14, 48+z; + +// maddBmi2Adx multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64, bmi2, adx +#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \ + MOVQ i+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \ + MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \ + MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \ + MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \ + MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \ + MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \ + MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \ + ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \ + MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCXQ AX, R13; \ + MULXQ 48+x, AX, R15; ADCXQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \ + maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \ + maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \ + maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \ + maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \ + maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \ + maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \ + MOVQ R15, 56+z; \ + MOVQ R9, 64+z; \ + MOVQ R10, 72+z; \ + MOVQ R11, 80+z; \ + MOVQ R12, 88+z; \ + MOVQ R13, 96+z; \ + MOVQ R14, 104+z; + +// maddLegacy multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64 +#define maddLegacy(z,x,y,i) \ + MOVQ i+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \ + ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \ + ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \ + ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \ + ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \ + ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \ + ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \ + ADCQ $0, DX; MOVQ DX, 56+i+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \ + maddLegacy(z,x,y, 8) \ + maddLegacy(z,x,y,16) \ + maddLegacy(z,x,y,24) \ + maddLegacy(z,x,y,32) \ + maddLegacy(z,x,y,40) \ + maddLegacy(z,x,y,48) + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, CX; \ + MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \ + ADDQ CX, CX; ADCQ $0, R15; \ + MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + \ + MOVQ 8+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 8+x, CX; ADCQ $0, R15; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \ + \ + MOVQ 16+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 16+x, CX; ADCQ $0, R15; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \ + \ + MOVQ 24+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ADDQ 24+x, CX; ADCQ $0, R15; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \ + \ + MOVQ 32+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 32+x, CX; ADCQ $0, R15; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \ + \ + XORL R13, R13; \ + XORL R14, R14; \ + MOVQ 40+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 40+x, CX; ADCQ $0, R15; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \ + \ + XORL R9, R9; \ + MOVQ 48+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z; + + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, DX; \ + ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \ + ADDQ DX, DX; ADCQ $0, R15; CLC; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 32+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 40+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 48+x, AX, R14; ADCXQ AX, R13; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \ + \ + MOVQ 8+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \ + ADDQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 8+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \ + \ + MOVQ 16+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \ + ADDQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 16+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \ + \ + MOVQ 24+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \ + ADDQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R8; \ + ADCQ $0, R9; \ + ADDQ 24+x, DX; \ + ADCQ $0, R15; \ + XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \ + \ + MOVQ 32+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \ + ADDQ AX, R9; MOVQ R9, 64+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 32+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \ + \ + MOVQ 40+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \ + ADDQ AX, R11; MOVQ R11, 80+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 40+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \ + \ + MOVQ 48+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \ + XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \ + ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \ + ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z; + +// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64 +#define reduceFromDoubleLeg(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCQ $0,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + ADDQ 0+z,R10; \ + ADCQ 8+z,R11; \ + ADCQ 16+z,R12; \ + ADCQ 24+z,R13; \ + ADCQ 32+z,R15; \ + ADCQ 40+z, R8; \ + ADCQ 48+z, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + ADDQ R14,R10; MOVQ $0,R14; \ + ADCQ $0,R11; \ + ADCQ $0,R12; \ + ADCQ AX,R13; \ + ADCQ $0,R15; \ + ADCQ $0, R8; \ + ADCQ $0, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32,AX; \ + ADDQ R14,R10; MOVQ R10, 0+z; \ + ADCQ $0,R11; MOVQ R11, 8+z; \ + ADCQ $0,R12; MOVQ R12,16+z; \ + ADCQ AX,R13; MOVQ R13,24+z; \ + ADCQ $0,R15; MOVQ R15,32+z; \ + ADCQ $0, R8; MOVQ R8,40+z; \ + ADCQ $0, R9; MOVQ R9,48+z; + +// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64, adx +#define reduceFromDoubleAdx(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + XORL AX,AX; \ + ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCXQ AX,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + XORL AX,AX; \ + ADCXQ 0+z,R10; \ + ADCXQ 8+z,R11; \ + ADCXQ 16+z,R12; \ + ADCXQ 24+z,R13; \ + ADCXQ 32+z,R15; \ + ADCXQ 40+z, R8; \ + ADCXQ 48+z, R9; \ + ADCXQ AX,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ $0,R14; \ + ADCXQ R14,R11; \ + ADCXQ R14,R12; \ + ADCXQ AX,R13; \ + ADCXQ R14,R15; \ + ADCXQ R14, R8; \ + ADCXQ R14, R9; \ + ADCXQ R14,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \ + ADCXQ R14,R11; MOVQ R11, 8+z; \ + ADCXQ R14,R12; MOVQ R12,16+z; \ + ADCXQ AX,R13; MOVQ R13,24+z; \ + ADCXQ R14,R15; MOVQ R15,32+z; \ + ADCXQ R14, R8; MOVQ R8,40+z; \ + ADCXQ R14, R9; MOVQ R9,48+z; + +// addSub calculates two operations: x,y = x+y,x-y +// Uses: AX, DX, R8-R15, FLAGS +#define addSub(x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \ + ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \ + ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \ + ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \ + ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \ + ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \ + ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \ + SUBQ 0+y, R8; \ + SBBQ 8+y, R9; \ + SBBQ 16+y, R10; \ + SBBQ 24+y, R11; \ + SBBQ 32+y, R12; \ + SBBQ 40+y, R13; \ + SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+y; \ + SBBQ $0, R9; MOVQ R9, 8+y; \ + SBBQ $0, R10; MOVQ R10, 16+y; \ + SBBQ DX, R11; MOVQ R11, 24+y; \ + SBBQ $0, R12; MOVQ R12, 32+y; \ + SBBQ $0, R13; MOVQ R13, 40+y; \ + SBBQ $0, R14; MOVQ R14, 48+y; diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s new file mode 100644 index 00000000..3f1f07c9 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -0,0 +1,75 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" +#include "fp_amd64.h" + +// func cmovAmd64(x, y *Elt, n uint) +TEXT ·cmovAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cselect(0(DI),0(SI),BX) + RET + +// func cswapAmd64(x, y *Elt, n uint) +TEXT ·cswapAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cswap(0(DI),0(SI),BX) + RET + +// func subAmd64(z, x, y *Elt) +TEXT ·subAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + subtraction(0(DI),0(SI),0(BX)) + RET + +// func addsubAmd64(x, y *Elt) +TEXT ·addsubAmd64(SB),NOSPLIT,$0-16 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + addSub(0(DI),0(SI)) + RET + +#define addLegacy \ + additionLeg(0(DI),0(SI),0(BX)) +#define addBmi2Adx \ + additionAdx(0(DI),0(SI),0(BX)) + +#define mulLegacy \ + integerMulLeg(0(SP),0(SI),0(BX)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define mulBmi2Adx \ + integerMulAdx(0(SP),0(SI),0(BX)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +#define sqrLegacy \ + integerSqrLeg(0(SP),0(SI)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define sqrBmi2Adx \ + integerSqrAdx(0(SP),0(SI)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +// func addAmd64(z, x, y *Elt) +TEXT ·addAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx) + +// func mulAmd64(z, x, y *Elt) +TEXT ·mulAmd64(SB),NOSPLIT,$112-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx) + +// func sqrAmd64(z, x *Elt) +TEXT ·sqrAmd64(SB),NOSPLIT,$112-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go new file mode 100644 index 00000000..47a0b632 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go @@ -0,0 +1,339 @@ +package fp448 + +import ( + "encoding/binary" + "math/bits" +) + +func cmovGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + x0 = (x0 &^ m) | (y0 & m) + x1 = (x1 &^ m) | (y1 & m) + x2 = (x2 &^ m) | (y2 & m) + x3 = (x3 &^ m) | (y3 & m) + x4 = (x4 &^ m) | (y4 & m) + x5 = (x5 &^ m) | (y5 & m) + x6 = (x6 &^ m) | (y6 & m) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) +} + +func cswapGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + t0 := m & (x0 ^ y0) + t1 := m & (x1 ^ y1) + t2 := m & (x2 ^ y2) + t3 := m & (x3 ^ y3) + t4 := m & (x4 ^ y4) + t5 := m & (x5 ^ y5) + t6 := m & (x6 ^ y6) + x0 ^= t0 + x1 ^= t1 + x2 ^= t2 + x3 ^= t3 + x4 ^= t4 + x5 ^= t5 + x6 ^= t6 + y0 ^= t0 + y1 ^= t1 + y2 ^= t2 + y3 ^= t3 + y4 ^= t4 + y5 ^= t5 + y6 ^= t6 + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) + + binary.LittleEndian.PutUint64(y[0*8:1*8], y0) + binary.LittleEndian.PutUint64(y[1*8:2*8], y1) + binary.LittleEndian.PutUint64(y[2*8:3*8], y2) + binary.LittleEndian.PutUint64(y[3*8:4*8], y3) + binary.LittleEndian.PutUint64(y[4*8:5*8], y4) + binary.LittleEndian.PutUint64(y[5*8:6*8], y5) + binary.LittleEndian.PutUint64(y[6*8:7*8], y6) +} + +func addGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Add64(x0, y0, 0) + z1, c1 := bits.Add64(x1, y1, c0) + z2, c2 := bits.Add64(x2, y2, c1) + z3, c3 := bits.Add64(x3, y3, c2) + z4, c4 := bits.Add64(x4, y4, c3) + z5, c5 := bits.Add64(x5, y5, c4) + z6, z7 := bits.Add64(x6, y6, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, z7 = bits.Add64(z6, 0, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, _ = bits.Add64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func subGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Sub64(x0, y0, 0) + z1, c1 := bits.Sub64(x1, y1, c0) + z2, c2 := bits.Sub64(x2, y2, c1) + z3, c3 := bits.Sub64(x3, y3, c2) + z4, c4 := bits.Sub64(x4, y4, c3) + z5, c5 := bits.Sub64(x5, y5, c4) + z6, z7 := bits.Sub64(x6, y6, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, z7 = bits.Sub64(z6, 0, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, _ = bits.Sub64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func addsubGeneric(x, y *Elt) { + z := &Elt{} + addGeneric(z, x, y) + subGeneric(y, x, y) + *x = *z +} + +func mulGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6} + zz := [7]uint64{} + + yi := yy[0] + h0, l0 := bits.Mul64(x0, yi) + h1, l1 := bits.Mul64(x1, yi) + h2, l2 := bits.Mul64(x2, yi) + h3, l3 := bits.Mul64(x3, yi) + h4, l4 := bits.Mul64(x4, yi) + h5, l5 := bits.Mul64(x5, yi) + h6, l6 := bits.Mul64(x6, yi) + + zz[0] = l0 + a0, c0 := bits.Add64(h0, l1, 0) + a1, c1 := bits.Add64(h1, l2, c0) + a2, c2 := bits.Add64(h2, l3, c1) + a3, c3 := bits.Add64(h3, l4, c2) + a4, c4 := bits.Add64(h4, l5, c3) + a5, c5 := bits.Add64(h5, l6, c4) + a6, _ := bits.Add64(h6, 0, c5) + + for i := 1; i < 7; i++ { + yi = yy[i] + h0, l0 = bits.Mul64(x0, yi) + h1, l1 = bits.Mul64(x1, yi) + h2, l2 = bits.Mul64(x2, yi) + h3, l3 = bits.Mul64(x3, yi) + h4, l4 = bits.Mul64(x4, yi) + h5, l5 = bits.Mul64(x5, yi) + h6, l6 = bits.Mul64(x6, yi) + + zz[i], c0 = bits.Add64(a0, l0, 0) + a0, c1 = bits.Add64(a1, l1, c0) + a1, c2 = bits.Add64(a2, l2, c1) + a2, c3 = bits.Add64(a3, l3, c2) + a3, c4 = bits.Add64(a4, l4, c3) + a4, c5 = bits.Add64(a5, l5, c4) + a5, a6 = bits.Add64(a6, l6, c5) + + a0, c0 = bits.Add64(a0, h0, 0) + a1, c1 = bits.Add64(a1, h1, c0) + a2, c2 = bits.Add64(a2, h2, c1) + a3, c3 = bits.Add64(a3, h3, c2) + a4, c4 = bits.Add64(a4, h4, c3) + a5, c5 = bits.Add64(a5, h5, c4) + a6, _ = bits.Add64(a6, h6, c5) + } + red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6}) +} + +func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) } + +func red64(z *Elt, l, h *[7]uint64) { + /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */ + h0 := h[0] + h1 := h[1] + h2 := h[2] + h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF) + h4 := (h[3] >> 63) | (h[4] << 1) + h5 := (h[4] >> 63) | (h[5] << 1) + h6 := (h[5] >> 63) | (h[6] << 1) + h7 := (h[6] >> 63) + + l0, c0 := bits.Add64(h0, l[0], 0) + l1, c1 := bits.Add64(h1, l[1], c0) + l2, c2 := bits.Add64(h2, l[2], c1) + l3, c3 := bits.Add64(h3, l[3], c2) + l4, c4 := bits.Add64(h4, l[4], c3) + l5, c5 := bits.Add64(h5, l[5], c4) + l6, c6 := bits.Add64(h6, l[6], c5) + l7, _ := bits.Add64(h7, 0, c6) + + /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */ + h0 = (h[3] >> 32) | (h[4] << 32) + h1 = (h[4] >> 32) | (h[5] << 32) + h2 = (h[5] >> 32) | (h[6] << 32) + h3 = (h[6] >> 32) | (h[0] << 32) + h4 = (h[0] >> 32) | (h[1] << 32) + h5 = (h[1] >> 32) | (h[2] << 32) + h6 = (h[2] >> 32) | (h[3] << 32) + + l0, c0 = bits.Add64(l0, h0, 0) + l1, c1 = bits.Add64(l1, h1, c0) + l2, c2 = bits.Add64(l2, h2, c1) + l3, c3 = bits.Add64(l3, h3, c2) + l4, c4 = bits.Add64(l4, h4, c3) + l5, c5 = bits.Add64(l5, h5, c4) + l6, c6 = bits.Add64(l6, h6, c5) + l7, _ = bits.Add64(l7, 0, c6) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, _ = bits.Add64(l6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], l0) + binary.LittleEndian.PutUint64(z[1*8:2*8], l1) + binary.LittleEndian.PutUint64(z[2*8:3*8], l2) + binary.LittleEndian.PutUint64(z[3*8:4*8], l3) + binary.LittleEndian.PutUint64(z[4*8:5*8], l4) + binary.LittleEndian.PutUint64(z[5*8:6*8], l5) + binary.LittleEndian.PutUint64(z[6*8:7*8], l6) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go new file mode 100644 index 00000000..a62225d2 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp448 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go new file mode 100644 index 00000000..2d7afc80 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go @@ -0,0 +1,75 @@ +//go:build gofuzz +// +build gofuzz + +// How to run the fuzzer: +// +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build +// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a +// $ clang -fsanitize=fuzzer lib.a -o fu.exe +// $ ./fu.exe +package fp448 + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// FuzzReduction is a fuzzer target for red64 function, which reduces t +// (112 bits) to a number t' (56 bits) congruent modulo p448. +func FuzzReduction(data []byte) int { + if len(data) != 2*Size { + return -1 + } + var got, want Elt + var lo, hi [7]uint64 + a := data[:Size] + b := data[Size:] + lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8]) + lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8]) + lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8]) + lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8]) + lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8]) + lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8]) + lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8]) + + hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8]) + hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8]) + hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8]) + hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8]) + hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8]) + hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8]) + hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8]) + + red64(&got, &lo, &hi) + + t := conv.BytesLe2BigInt(data[:2*Size]) + + two448 := big.NewInt(1) + two448.Lsh(two448, 448) // 2^448 + mask448 := big.NewInt(1) + mask448.Sub(two448, mask448) // 2^448-1 + two224plus1 := big.NewInt(1) + two224plus1.Lsh(two224plus1, 224) + two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1 + + var loBig, hiBig big.Int + for t.Cmp(two448) >= 0 { + loBig.And(t, mask448) + hiBig.Rsh(t, 448) + t.Mul(&hiBig, two224plus1) + t.Add(t, &loBig) + } + conv.BigInt2BytesLe(want[:], t) + + if got != want { + fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size])) + fmt.Printf("got: %v\n", got) + fmt.Printf("want: %v\n", want) + panic("error found") + } + return 1 +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/BUILD.bazel b/vendor/github.com/cloudflare/circl/math/mlsbset/BUILD.bazel new file mode 100644 index 00000000..accc7700 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "mlsbset", + srcs = [ + "mlsbset.go", + "power.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/math/mlsbset", + importpath = "github.com/cloudflare/circl/math/mlsbset", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/cloudflare/circl/internal/conv"], +) diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go new file mode 100644 index 00000000..a43851b8 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go @@ -0,0 +1,122 @@ +// Package mlsbset provides a constant-time exponentiation method with precomputation. +// +// References: "Efficient and secure algorithms for GLV-based scalar +// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// - https://doi.org/10.1007/s13389-014-0085-7 +// - https://eprint.iacr.org/2013/158 +package mlsbset + +import ( + "errors" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// EltG is a group element. +type EltG interface{} + +// EltP is a precomputed group element. +type EltP interface{} + +// Group defines the operations required by MLSBSet exponentiation method. +type Group interface { + Identity() EltG // Returns the identity of the group. + Sqr(x EltG) // Calculates x = x^2. + Mul(x EltG, y EltP) // Calculates x = x*y. + NewEltP() EltP // Returns an arbitrary precomputed element. + ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)). + Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u]. +} + +// Params contains the parameters of the encoding. +type Params struct { + T uint // T is the maximum size (in bits) of exponents. + V uint // V is the number of tables. + W uint // W is the window size. + E uint // E is the number of digits per table. + D uint // D is the number of digits in total. + L uint // L is the length of the code. +} + +// Encoder allows to convert integers into valid powers. +type Encoder struct{ p Params } + +// New produces an encoder of the MLSBSet algorithm. +func New(t, v, w uint) (Encoder, error) { + if !(t > 1 && v >= 1 && w >= 2) { + return Encoder{}, errors.New("t>1, v>=1, w>=2") + } + e := (t + w*v - 1) / (w * v) + d := e * v + l := d * w + return Encoder{Params{t, v, w, e, d, l}}, nil +} + +// Encode converts an odd integer k into a valid power for exponentiation. +func (m Encoder) Encode(k []byte) (*Power, error) { + if len(k) == 0 { + return nil, errors.New("empty slice") + } + if !(len(k) <= int(m.p.L+7)>>3) { + return nil, errors.New("k too big") + } + if k[0]%2 == 0 { + return nil, errors.New("k must be odd") + } + ap := int((m.p.L+7)/8) - len(k) + k = append(k, make([]byte, ap)...) + s := m.signs(k) + b := make([]int32, m.p.L-m.p.D) + c := conv.BytesLe2BigInt(k) + c.Rsh(c, m.p.D) + var bi big.Int + for i := m.p.D; i < m.p.L; i++ { + c0 := int32(c.Bit(0)) + b[i-m.p.D] = s[i%m.p.D] * c0 + bi.SetInt64(int64(b[i-m.p.D] >> 1)) + c.Rsh(c, 1) + c.Sub(c, &bi) + } + carry := int(c.Int64()) + return &Power{m, s, b, carry}, nil +} + +// signs calculates the set of signs. +func (m Encoder) signs(k []byte) []int32 { + s := make([]int32, m.p.D) + s[m.p.D-1] = 1 + for i := uint(1); i < m.p.D; i++ { + ki := int32((k[i>>3] >> (i & 0x7)) & 0x1) + s[i-1] = 2*ki - 1 + } + return s +} + +// GetParams returns the complementary parameters of the encoding. +func (m Encoder) GetParams() Params { return m.p } + +// tableSize returns the size of each table. +func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) } + +// Elts returns the total number of elements that must be precomputed. +func (m Encoder) Elts() uint { return m.p.V * m.tableSize() } + +// IsExtended returns true if the element x^(2^(wd)) must be calculated. +func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W } + +// Ops returns the number of squares and multiplications executed during an exponentiation. +func (m Encoder) Ops() (S uint, M uint) { + S = m.p.E + M = m.p.E * m.p.V + if m.IsExtended() { + M++ + } + return +} + +func (m Encoder) String() string { + return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v", + m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended()) +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go new file mode 100644 index 00000000..3f214c30 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go @@ -0,0 +1,64 @@ +package mlsbset + +import "fmt" + +// Power is a valid exponent produced by the MLSBSet encoding algorithm. +type Power struct { + set Encoder // parameters of code. + s []int32 // set of signs. + b []int32 // set of digits. + c int // carry is {0,1}. +} + +// Exp is calculates x^k, where x is a predetermined element of a group G. +func (p *Power) Exp(G Group) EltG { + a, b := G.Identity(), G.NewEltP() + for e := int(p.set.p.E - 1); e >= 0; e-- { + G.Sqr(a) + for v := uint(0); v < p.set.p.V; v++ { + sgnElt, idElt := p.Digit(v, uint(e)) + G.Lookup(b, v, sgnElt, idElt) + G.Mul(a, b) + } + } + if p.set.IsExtended() && p.c == 1 { + G.Mul(a, G.ExtendedEltP()) + } + return a +} + +// Digit returns the (v,e)-th digit and its sign. +func (p *Power) Digit(v, e uint) (sgn, dig int32) { + sgn = p.bit(0, v, e) + dig = 0 + for i := p.set.p.W - 1; i > 0; i-- { + dig = 2*dig + p.bit(i, v, e) + } + mask := dig >> 31 + dig = (dig + mask) ^ mask + return sgn, dig +} + +// bit returns the (w,v,e)-th bit of the code. +func (p *Power) bit(w, v, e uint) int32 { + if !(w < p.set.p.W && + v < p.set.p.V && + e < p.set.p.E) { + panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e)) + } + if w == 0 { + return p.s[p.set.p.E*v+e] + } + return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e] +} + +func (p *Power) String() string { + dig := "" + for j := uint(0); j < p.set.p.V; j++ { + for i := uint(0); i < p.set.p.E; i++ { + s, d := p.Digit(j, i) + dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d) + } + } + return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig) +} diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go new file mode 100644 index 00000000..158fd83a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/primes.go @@ -0,0 +1,34 @@ +package math + +import ( + "crypto/rand" + "io" + "math/big" +) + +// IsSafePrime reports whether p is (probably) a safe prime. +// The prime p=2*q+1 is safe prime if both p and q are primes. +// Note that ProbablyPrime is not suitable for judging primes +// that an adversary may have crafted to fool the test. +func IsSafePrime(p *big.Int) bool { + pdiv2 := new(big.Int).Rsh(p, 1) + return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) +} + +// SafePrime returns a number of the given bit length that is a safe prime with high probability. +// The number returned p=2*q+1 is a safe prime if both p and q are primes. +// SafePrime will return error for any error returned by rand.Read or if bits < 2. +func SafePrime(random io.Reader, bits int) (*big.Int, error) { + one := big.NewInt(1) + p := new(big.Int) + for { + q, err := rand.Prime(random, bits-1) + if err != nil { + return nil, err + } + p.Lsh(q, 1).Add(p, one) + if p.ProbablyPrime(20) { + return p, nil + } + } +} diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go new file mode 100644 index 00000000..94a1ec50 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/wnaf.go @@ -0,0 +1,84 @@ +// Package math provides some utility functions for big integers. +package math + +import "math/big" + +// SignedDigit obtains the signed-digit recoding of n and returns a list L of +// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number +// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the +// output has ceil(l/(w-1)) digits. +// +// Restrictions: +// - n is odd and n > 0. +// - 1 < w < 32. +// - l >= bit length of n. +// +// References: +// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms" +// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21 +// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and +// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y +func SignedDigit(n *big.Int, w, l uint) []int32 { + if n.Sign() <= 0 || n.Bit(0) == 0 { + panic("n must be non-zero, odd, and positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + if uint(n.BitLen()) > l { + panic("n is too big to fit in l digits") + } + lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1)) + L := make([]int32, lenN+1) + var k, v big.Int + k.Set(n) + + var i uint + for i = 0; i < lenN; i++ { + words := k.Bits() + value := int32(words[0] & ((1 << w) - 1)) + value -= int32(1) << (w - 1) + L[i] = value + v.SetInt64(int64(value)) + k.Sub(&k, &v) + k.Rsh(&k, w-1) + } + L[i] = int32(k.Int64()) + return L +} + +// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and +// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ). +// +// Reference: +// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas. +// http://doi.org/10.1023/A:1008306223194 +func OmegaNAF(n *big.Int, w uint) (L []int32) { + if n.Sign() < 0 { + panic("n must be positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + + L = make([]int32, n.BitLen()+1) + var k, v big.Int + k.Set(n) + + i := 0 + for ; k.Sign() > 0; i++ { + value := int32(0) + if k.Bit(0) == 1 { + words := k.Bits() + value = int32(words[0] & ((1 << w) - 1)) + if value >= (int32(1) << (w - 1)) { + value -= int32(1) << w + } + v.SetInt64(int64(value)) + k.Sub(&k, &v) + } + L[i] = value + k.Rsh(&k, 1) + } + return L[:i] +} diff --git a/vendor/github.com/cloudflare/circl/sign/BUILD.bazel b/vendor/github.com/cloudflare/circl/sign/BUILD.bazel new file mode 100644 index 00000000..288d0314 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sign", + srcs = ["sign.go"], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/sign", + importpath = "github.com/cloudflare/circl/sign", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/BUILD.bazel b/vendor/github.com/cloudflare/circl/sign/ed25519/BUILD.bazel new file mode 100644 index 00000000..127af621 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ed25519", + srcs = [ + "ed25519.go", + "modular.go", + "mult.go", + "point.go", + "pubkey.go", + "pubkey112.go", + "signapi.go", + "tables.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/sign/ed25519", + importpath = "github.com/cloudflare/circl/sign/ed25519", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/internal/conv", + "//vendor/github.com/cloudflare/circl/math", + "//vendor/github.com/cloudflare/circl/math/fp25519", + "//vendor/github.com/cloudflare/circl/sign", + ], +) diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go new file mode 100644 index 00000000..2c73c26f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -0,0 +1,453 @@ +// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. +// +// This package provides optimized implementations of the three signature +// variants and maintaining closer compatibility with crypto/ed25519. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed25519 | Sign | Verify | None | +// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty | +// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain +// separation. This parameter is passed using a SignerOptions struct defined +// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx +// enforces non-empty context strings. +// +// # Compatibility with crypto.ed25519 +// +// These functions are compatible with the “Ed25519” function defined in +// RFC-8032. However, unlike RFC 8032's formulation, this package's private +// key representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the +// RFC-8032 private key as the “seed”. +// +// References +// +// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt +// - Ed25519: https://ed25519.cr.yp.to/ +// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1 +package ed25519 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +const ( + paramB = 256 / 8 // Size of keys in bytes. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed25519 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512 + // for Ed25519ph. + crypto.Hash + + // Context is an optional domain separation string for Ed25519ph and a + // must for Ed25519ctx. Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. The zero value + // is ED25519. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED25519 SchemeID = iota + ED25519Ph + ED25519Ctx +) + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return publicKey +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// Sign creates a signature of a message with priv key. +// This function is compatible with crypto.ed25519 and also supports the +// three signature variants defined in RFC-8032, namely Ed25519 (or pure +// EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for +// opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct (defined in this package) to pass a context +// string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message), nil + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return SignPh(priv, message, ctx), nil + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return SignWithCtx(priv, message, ctx), nil + default: + return nil, errors.New("ed25519: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make(PrivateKey, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + var P pointR1 + k := sha512.Sum512(seed) + clamp(k[:]) + reduceModOrder(k[:paramB], false) + P.fixedMult(k[:paramB]) + copy(privateKey[:SeedSize], seed) + _ = P.ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 32-byte private key using SHA-512. + _, _ = H.Write(privateKey[:SeedSize]) + h := H.Sum(nil) + clamp(h[:]) + prefix, s := h[paramB:], h[:paramB] + + // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)) + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + r := H.Sum(nil) + reduceModOrder(r[:], true) + + // 3. Compute the point [r]B. + var P pointR1 + P.fixedMult(r[:paramB]) + R := (&[paramB]byte{})[:] + if err := P.ToBytes(R); err != nil { + panic(err) + } + + // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)). + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + + reduceModOrder(hRAM[:], true) + + // 5. Compute S = (r + k * s) mod order. + S := (&[paramB]byte{})[:] + calculateS(S, r[:paramB], hRAM[:paramB], s) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(""), false) + return signature +} + +// SignPh creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512, and optionally +// accepts a context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context could be passed to this function, which length should be no more than +// ContextMaxSize=255. It can be empty. +func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx))) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), true) + return signature +} + +// SignWithCtx creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it accepts a non-empty context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context must be passed to this function, which length should be no more than +// ContextMaxSize=255 and cannot be empty. +func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize)) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), false) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + var P pointR1 + if ok := P.FromBytes(public); !ok { + return false + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + R := signature[:paramB] + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + reduceModOrder(hRAM[:], true) + + var Q pointR1 + encR := (&[paramB]byte{})[:] + P.neg() + Q.doubleMult(&P, signature[paramB:], hRAM[:paramB]) + _ = Q.ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the three signature variants defined in RFC-8032, +// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature) + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return VerifyPh(public, message, signature, ctx) + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return VerifyWithCtx(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte) bool { + return verify(public, message, signature, []byte(""), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded, or when context is +// not provided. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it does not handle prehashed messages. Non-empty context string must be +// provided, and must not be more than 255 of length. +func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + return false + } + + return verify(public, message, signature, []byte(ctx), false) +} + +func clamp(k []byte) { + k[0] &= 248 + k[paramB-1] = (k[paramB-1] & 127) | 64 +} + +// isLessThanOrder returns true if 0 <= x < order. +func isLessThanOrder(x []byte) bool { + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom2 := "SigEd25519 no Ed25519 collisions" + + if len(ctx) > 0 { + _, _ = h.Write([]byte(dom2)) + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) + } else if preHash { + _, _ = h.Write([]byte(dom2)) + _, _ = h.Write([]byte{0x01, 0x00}) + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go new file mode 100644 index 00000000..10efafdc --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go @@ -0,0 +1,175 @@ +package ed25519 + +import ( + "encoding/binary" + "math/bits" +) + +var order = [paramB]byte{ + 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, + 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// reduceModOrder calculates k = k mod order of the curve. +func reduceModOrder(k []byte, is512Bit bool) { + var X [((2 * paramB) * 8) / 64]uint64 + numWords := len(k) >> 3 + for i := 0; i < numWords; i++ { + X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8]) + } + red512(&X, is512Bit) + for i := 0; i < numWords; i++ { + binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i]) + } +} + +// red512 calculates x = x mod Order of the curve. +func red512(x *[8]uint64, full bool) { + // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied + // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone. + const ( + ell0 = uint64(0x5812631a5cf5d3ed) + ell1 = uint64(0x14def9dea2f79cd6) + ell160 = uint64(0x812631a5cf5d3ed0) + ell161 = uint64(0x4def9dea2f79cd65) + ell162 = uint64(0x0000000000000001) + ) + + var c0, c1, c2, c3 uint64 + r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0) + + if full { + q0, q1, q2, q3 := x[4], x[5], x[6], x[7] + + for i := 0; i < 3; i++ { + h0, s0 := bits.Mul64(q0, ell160) + h1, s1 := bits.Mul64(q1, ell160) + h2, s2 := bits.Mul64(q2, ell160) + h3, s3 := bits.Mul64(q3, ell160) + + s1, c0 = bits.Add64(h0, s1, 0) + s2, c1 = bits.Add64(h1, s2, c0) + s3, c2 = bits.Add64(h2, s3, c1) + s4, _ := bits.Add64(h3, 0, c2) + + h0, l0 := bits.Mul64(q0, ell161) + h1, l1 := bits.Mul64(q1, ell161) + h2, l2 := bits.Mul64(q2, ell161) + h3, l3 := bits.Mul64(q3, ell161) + + l1, c0 = bits.Add64(h0, l1, 0) + l2, c1 = bits.Add64(h1, l2, c0) + l3, c2 = bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + s1, c0 = bits.Add64(s1, l0, 0) + s2, c1 = bits.Add64(s2, l1, c0) + s3, c2 = bits.Add64(s3, l2, c1) + s4, c3 = bits.Add64(s4, l3, c2) + s5, s6 := bits.Add64(l4, 0, c3) + + s2, c0 = bits.Add64(s2, q0, 0) + s3, c1 = bits.Add64(s3, q1, c0) + s4, c2 = bits.Add64(s4, q2, c1) + s5, c3 = bits.Add64(s5, q3, c2) + s6, s7 := bits.Add64(s6, 0, c3) + + q := q0 | q1 | q2 | q3 + m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1 + s0 &= m + s1 &= m + s2 &= m + s3 &= m + q0, q1, q2, q3 = s4, s5, s6, s7 + + if (i+1)%2 == 0 { + r0, c0 = bits.Add64(r0, s0, 0) + r1, c1 = bits.Add64(r1, s1, c0) + r2, c2 = bits.Add64(r2, s2, c1) + r3, c3 = bits.Add64(r3, s3, c2) + r4, _ = bits.Add64(r4, 0, c3) + } else { + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, c3 = bits.Sub64(r3, s3, c2) + r4, _ = bits.Sub64(r4, 0, c3) + } + } + + m := -(r4 >> 63) + r0, c0 = bits.Add64(r0, m&ell160, 0) + r1, c1 = bits.Add64(r1, m&ell161, c0) + r2, c2 = bits.Add64(r2, m&ell162, c1) + r3, c3 = bits.Add64(r3, 0, c2) + r4, _ = bits.Add64(r4, m&1, c3) + x[4], x[5], x[6], x[7] = 0, 0, 0, 0 + } + + q0 := (r4 << 4) | (r3 >> 60) + r3 &= (uint64(1) << 60) - 1 + + h0, s0 := bits.Mul64(ell0, q0) + h1, s1 := bits.Mul64(ell1, q0) + s1, c0 = bits.Add64(h0, s1, 0) + s2, _ := bits.Add64(h1, 0, c0) + + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, _ = bits.Sub64(r3, 0, c2) + + x[0], x[1], x[2], x[3] = r0, r1, r2, r3 +} + +// calculateS performs s = r+k*a mod Order of the curve. +func calculateS(s, r, k, a []byte) { + K := [4]uint64{ + binary.LittleEndian.Uint64(k[0*8 : 1*8]), + binary.LittleEndian.Uint64(k[1*8 : 2*8]), + binary.LittleEndian.Uint64(k[2*8 : 3*8]), + binary.LittleEndian.Uint64(k[3*8 : 4*8]), + } + S := [8]uint64{ + binary.LittleEndian.Uint64(r[0*8 : 1*8]), + binary.LittleEndian.Uint64(r[1*8 : 2*8]), + binary.LittleEndian.Uint64(r[2*8 : 3*8]), + binary.LittleEndian.Uint64(r[3*8 : 4*8]), + } + var c3 uint64 + for i := range K { + ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8]) + + h0, l0 := bits.Mul64(K[0], ai) + h1, l1 := bits.Mul64(K[1], ai) + h2, l2 := bits.Mul64(K[2], ai) + h3, l3 := bits.Mul64(K[3], ai) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + S[i+0], c0 = bits.Add64(S[i+0], l0, 0) + S[i+1], c1 = bits.Add64(S[i+1], l1, c0) + S[i+2], c2 = bits.Add64(S[i+2], l2, c1) + S[i+3], c3 = bits.Add64(S[i+3], l3, c2) + S[i+4], _ = bits.Add64(S[i+4], l4, c3) + } + red512(&S, true) + binary.LittleEndian.PutUint64(s[0*8:1*8], S[0]) + binary.LittleEndian.PutUint64(s[1*8:2*8], S[1]) + binary.LittleEndian.PutUint64(s[2*8:3*8], S[2]) + binary.LittleEndian.PutUint64(s[3*8:4*8], S[3]) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go new file mode 100644 index 00000000..3216aae3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go @@ -0,0 +1,180 @@ +package ed25519 + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp25519" +) + +var paramD = fp.Elt{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52, +} + +// mLSBRecoding parameters. +const ( + fxT = 257 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) + numWords64 = (paramB * 8 / 64) +) + +// mLSBRecoding is the odd-only modified LSB-set. +// +// Reference: +// +// "Efficient and secure algorithms for GLV-based scalar multiplication and +// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// http://doi.org/10.1007/s13389-014-0085-7. +func mLSBRecoding(L []int8, k []byte) { + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + if len(L) == (ll + 1) { + var m [numWords64 + 1]uint64 + for i := 0; i < numWords64; i++ { + m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8]) + } + condAddOrderN(&m) + L[dd-1] = 1 + for i := 0; i < dd-1; i++ { + kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1 + L[i] = int8(kip1<<1) - 1 + } + { // right-shift by d + right := uint(dd % 64) + left := uint(64) - right + lim := ((numWords64+1)*64 - dd) / 64 + j := dd / 64 + for i := 0; i < lim; i++ { + m[i] = (m[i+j] >> right) | (m[i+j+1] << left) + } + m[lim] = m[lim+j] >> right + } + for i := dd; i < ll; i++ { + L[i] = L[i%dd] * int8(m[0]&0x1) + div2subY(m[:], int64(L[i]>>1), numWords64) + } + L[ll] = int8(m[0]) + } +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} + +// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged. +func condAddOrderN(x *[numWords64 + 1]uint64) { + isOdd := (x[0] & 0x1) - 1 + c := uint64(0) + for i := 0; i < numWords64; i++ { + orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8]) + o := isOdd & orderWord + x0, c0 := bits.Add64(x[i], o, c) + x[i] = x0 + c = c0 + } + x[numWords64], _ = bits.Add64(x[numWords64], 0, c) +} + +// div2subY update x = (x/2) - y. +func div2subY(x []uint64, y int64, l int) { + s := uint64(y >> 63) + for i := 0; i < l-1; i++ { + x[i] = (x[i] >> 1) | (x[i+1] << 63) + } + x[l-1] = (x[l-1] >> 1) + + b := uint64(0) + x0, b0 := bits.Sub64(x[0], uint64(y), b) + x[0] = x0 + b = b0 + for i := 1; i < l-1; i++ { + x0, b0 := bits.Sub64(x[i], s, b) + x[i] = x0 + b = b0 + } + x[l-1], _ = bits.Sub64(x[l-1], s, b) +} + +func (P *pointR1) fixedMult(scalar []byte) { + if len(scalar) != paramB { + panic("wrong scalar size") + } + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + + L := make([]int8, ll+1) + mLSBRecoding(L[:], scalar) + S := &pointR3{} + P.SetIdentity() + for ii := ee - 1; ii >= 0; ii-- { + P.double() + for j := 0; j < fxV; j++ { + dig := L[fxW*dd-j*ee+ii-ee] + for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd { + dig = 2*dig + L[i] + } + idx := absolute(int32(dig)) + sig := L[dd-j*ee+ii-ee] + Tabj := &tabSign[fxV-j-1] + for k := 0; k < fx2w1; k++ { + S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx)) + } + S.cneg(subtle.ConstantTimeEq(int32(sig), -1)) + P.mixAdd(S) + } + } +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// doubleMult returns P=mG+nQ. +func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]pointR2 + Q.oddMultiples(TabQ[:]) + P.SetIdentity() + for i := len(nafFix) - 1; i >= 0; i-- { + P.double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + P.mixAdd(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + P.add(&S) + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go new file mode 100644 index 00000000..374a6950 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go @@ -0,0 +1,195 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +type ( + pointR1 struct{ x, y, z, ta, tb fp.Elt } + pointR2 struct { + pointR3 + z2 fp.Elt + } +) +type pointR3 struct{ addYX, subYX, dt2 fp.Elt } + +func (P *pointR1) neg() { + fp.Neg(&P.x, &P.x) + fp.Neg(&P.ta, &P.ta) +} + +func (P *pointR1) SetIdentity() { + P.x = fp.Elt{} + fp.SetOne(&P.y) + fp.SetOne(&P.z) + P.ta = fp.Elt{} + P.tb = fp.Elt{} +} + +func (P *pointR1) toAffine() { + fp.Inv(&P.z, &P.z) + fp.Mul(&P.x, &P.x, &P.z) + fp.Mul(&P.y, &P.y, &P.z) + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y +} + +func (P *pointR1) ToBytes(k []byte) error { + P.toAffine() + var x [fp.Size]byte + err := fp.ToBytes(k[:fp.Size], &P.y) + if err != nil { + return err + } + err = fp.ToBytes(x[:], &P.x) + if err != nil { + return err + } + b := x[0] & 1 + k[paramB-1] = k[paramB-1] | (b << 7) + return nil +} + +func (P *pointR1) FromBytes(k []byte) bool { + if len(k) != paramB { + panic("wrong size") + } + signX := k[paramB-1] >> 7 + copy(P.y[:], k[:fp.Size]) + P.y[fp.Size-1] &= 0x7F + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return false + } + + one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + fp.SetOne(one) + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, one) // u = y^2-1 + fp.Add(v, v, one) // v = dy^2+1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return false + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return false + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + fp.SetOne(&P.z) + return true +} + +// double calculates 2P for curves with A=-1. +func (P *pointR1) double() { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) mixAdd(Q *pointR3) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 + P.coreAddition(Q) +} + +func (P *pointR1) add(Q *pointR2) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.pointR3) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *pointR1) coreAddition(Q *pointR3) { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) oddMultiples(T []pointR2) { + var R pointR2 + n := len(T) + T[0].fromR1(P) + _2P := *P + _2P.double() + R.fromR1(&_2P) + for i := 1; i < n; i++ { + P.add(&R) + T[i].fromR1(P) + } +} + +func (P *pointR1) isEqual(Q *pointR1) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +func (P *pointR3) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *pointR2) fromR1(Q *pointR1) { + fp.Add(&P.addYX, &Q.y, &Q.x) + fp.Sub(&P.subYX, &Q.y, &Q.x) + fp.Mul(&P.dt2, &Q.ta, &Q.tb) + fp.Mul(&P.dt2, &P.dt2, ¶mD) + fp.Add(&P.dt2, &P.dt2, &P.dt2) + fp.Add(&P.z2, &Q.z, &Q.z) +} + +func (P *pointR3) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *pointR3) cmov(Q *pointR3, b int) { + fp.Cmov(&P.addYX, &Q.addYX, uint(b)) + fp.Cmov(&P.subYX, &Q.subYX, uint(b)) + fp.Cmov(&P.dt2, &Q.dt2, uint(b)) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go new file mode 100644 index 00000000..c3505b67 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package ed25519 + +import cryptoEd25519 "crypto/ed25519" + +// PublicKey is the type of Ed25519 public keys. +type PublicKey cryptoEd25519.PublicKey diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go new file mode 100644 index 00000000..d57d86ef --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go @@ -0,0 +1,7 @@ +//go:build !go1.13 +// +build !go1.13 + +package ed25519 + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go new file mode 100644 index 00000000..e4520f52 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go @@ -0,0 +1,87 @@ +package ed25519 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed25519" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0807 } +func (*scheme) SupportsContext() bool { return false } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 112} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil && opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + return Sign(priv, message) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil { + if opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + } + return Verify(pub, message, signature) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go new file mode 100644 index 00000000..8763b426 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go @@ -0,0 +1,213 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +var tabSign = [fxV][fx2w1]pointR3{ + { + pointR3{ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { + addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72}, + subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35}, + dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44}, + }, + { + addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e}, + subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12}, + dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35}, + }, + { + addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d}, + subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a}, + dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45}, + }, + }, + { + { + addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, + subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d}, + dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, + }, + { + addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b}, + subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53}, + dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31}, + }, + { + addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33}, + subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c}, + dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e}, + }, + { + addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23}, + subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76}, + dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19}, + }, + }, +} + +var tabVerif = [1 << (omegaFix - 2)]pointR3{ + { /* 1P */ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { /* 3P */ + addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, + subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, + dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, + }, + { /* 5P */ + addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, + subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, + dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, + }, + { /* 7P */ + addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, + subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, + dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, + }, + { /* 9P */ + addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34}, + subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49}, + dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73}, + }, + { /* 11P */ + addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42}, + subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18}, + dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d}, + }, + { /* 13P */ + addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23}, + subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02}, + dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49}, + }, + { /* 15P */ + addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61}, + subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43}, + dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51}, + }, + { /* 17P */ + addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d}, + subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71}, + dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47}, + }, + { /* 19P */ + addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c}, + subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21}, + dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48}, + }, + { /* 21P */ + addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55}, + subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77}, + dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a}, + }, + { /* 23P */ + addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44}, + subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01}, + dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15}, + }, + { /* 25P */ + addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b}, + subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78}, + dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72}, + }, + { /* 27P */ + addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79}, + subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c}, + dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70}, + }, + { /* 29P */ + addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06}, + subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13}, + dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c}, + }, + { /* 31P */ + addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a}, + subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a}, + dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28}, + }, + { /* 33P */ + addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32}, + subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76}, + dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56}, + }, + { /* 35P */ + addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa}, + subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03}, + dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20}, + }, + { /* 37P */ + addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75}, + subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69}, + dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54}, + }, + { /* 39P */ + addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d}, + subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78}, + dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70}, + }, + { /* 41P */ + addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c}, + subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40}, + dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20}, + }, + { /* 43P */ + addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b}, + subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38}, + dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04}, + }, + { /* 45P */ + addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79}, + subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51}, + dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c}, + }, + { /* 47P */ + addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e}, + subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75}, + dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71}, + }, + { /* 49P */ + addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41}, + subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e}, + dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f}, + }, + { /* 51P */ + addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32}, + subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62}, + dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b}, + }, + { /* 53P */ + addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a}, + subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a}, + dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a}, + }, + { /* 55P */ + addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c}, + subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11}, + dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05}, + }, + { /* 57P */ + addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28}, + subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e}, + dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a}, + }, + { /* 59P */ + addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12}, + subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d}, + dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33}, + }, + { /* 61P */ + addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21}, + subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23}, + dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d}, + }, + { /* 63P */ + addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a}, + subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31}, + dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/BUILD.bazel b/vendor/github.com/cloudflare/circl/sign/ed448/BUILD.bazel new file mode 100644 index 00000000..74debe45 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ed448", + srcs = [ + "ed448.go", + "signapi.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cloudflare/circl/sign/ed448", + importpath = "github.com/cloudflare/circl/sign/ed448", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/cloudflare/circl/ecc/goldilocks", + "//vendor/github.com/cloudflare/circl/internal/sha3", + "//vendor/github.com/cloudflare/circl/sign", + ], +) diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go new file mode 100644 index 00000000..324bd8f3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go @@ -0,0 +1,411 @@ +// Package ed448 implements Ed448 signature scheme as described in RFC-8032. +// +// This package implements two signature variants. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed448 | Sign | Verify | Yes, can be empty | +// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Both schemes require a context string for domain separation. This parameter +// is passed using a SignerOptions struct defined in this package. +// +// References: +// +// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt +// - EdDSA for more curves: https://eprint.iacr.org/2015/677 +// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1 +package ed448 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/ecc/goldilocks" + "github.com/cloudflare/circl/internal/sha3" + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the length in bytes of Ed448 public keys. + PublicKeySize = 57 + // PrivateKeySize is the length in bytes of Ed448 private keys. + PrivateKeySize = 114 + // SignatureSize is the length in bytes of signatures. + SignatureSize = 114 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 57 +) + +const ( + paramB = 456 / 8 // Size of keys in bytes. + hashSize = 2 * paramB // Size of the hash function's output. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed448 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph. + crypto.Hash + + // Context is an optional domain separation string for signing. + // Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED448 SchemeID = iota + ED448Ph +) + +// PublicKey is the type of Ed448 public keys. +type PublicKey []byte + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Sign creates a signature of a message given a key pair. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero to the specify Ed448 variant. This can +// be achieved by passing crypto.Hash(0) as the value for opts. +// Use an Options struct to pass a bool indicating that the ed448Ph variant +// should be used. +// The struct can also be optionally used to pass a context string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message, ctx), nil + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return SignPh(priv, message, ctx), nil + default: + return nil, errors.New("ed448: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make(PrivateKey, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make([]byte, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed448: bad seed length: " + strconv.Itoa(l)) + } + + var h [hashSize]byte + H := sha3.NewShake256() + _, _ = H.Write(seed) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + + copy(privateKey[:SeedSize], seed) + _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx)))) + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 57-byte private key using SHAKE256(x, 114). + var h [hashSize]byte + _, _ = H.Write(privateKey[:SeedSize]) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + prefix := h[paramB:] + + // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114). + var rPM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + _, _ = H.Read(rPM[:]) + + // 3. Compute the point [r]B. + r := &goldilocks.Scalar{} + r.FromBytes(rPM[:]) + R := (&[paramB]byte{})[:] + if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil { + panic(err) + } + // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114) + var hRAM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + // 5. Compute S = (r + k * s) mod order. + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.Mul(k, s) + S.Add(S, r) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), false) + return signature +} + +// SignPh creates a signature of a message given a keypair. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func SignPh(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), true) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + len(ctx) > ContextMaxSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + P, err := goldilocks.FromBytes(public) + if err != nil { + return false + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + var hRAM [hashSize]byte + R := signature[:paramB] + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.FromBytes(signature[paramB:]) + + encR := (&[paramB]byte{})[:] + P.Neg() + _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero, this can be achieved by passing +// crypto.Hash(0) as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature, ctx) + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return VerifyPh(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +func deriveSecretScalar(s *goldilocks.Scalar, h []byte) { + h[0] &= 0xFC // The two least significant bits of the first octet are cleared, + h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and + h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set. + s.FromBytes(h[:paramB]) +} + +// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero. +func isLessThanOrder(x []byte) bool { + order := goldilocks.Curve{}.Order() + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[paramB-1] == 0 && x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom4 := "SigEd448" + _, _ = h.Write([]byte(dom4)) + + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go new file mode 100644 index 00000000..22da8bc0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go @@ -0,0 +1,87 @@ +package ed448 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed448" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0808 } +func (*scheme) SupportsContext() bool { return true } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 113} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Sign(priv, message, ctx) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Verify(pub, message, signature, ctx) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go new file mode 100644 index 00000000..13b20fa4 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -0,0 +1,110 @@ +// Package sign provides unified interfaces for signature schemes. +// +// A register of schemes is available in the package +// +// github.com/cloudflare/circl/sign/schemes +package sign + +import ( + "crypto" + "encoding" + "errors" +) + +type SignatureOpts struct { + // If non-empty, includes the given context in the signature if supported + // and will cause an error during signing otherwise. + Context string +} + +// A public key is used to verify a signature set by the corresponding private +// key. +type PublicKey interface { + // Returns the signature scheme for this public key. + Scheme() Scheme + Equal(crypto.PublicKey) bool + encoding.BinaryMarshaler + crypto.PublicKey +} + +// A private key allows one to create signatures. +type PrivateKey interface { + // Returns the signature scheme for this private key. + Scheme() Scheme + Equal(crypto.PrivateKey) bool + // For compatibility with Go standard library + crypto.Signer + crypto.PrivateKey + encoding.BinaryMarshaler +} + +// A Scheme represents a specific instance of a signature scheme. +type Scheme interface { + // Name of the scheme. + Name() string + + // GenerateKey creates a new key-pair. + GenerateKey() (PublicKey, PrivateKey, error) + + // Creates a signature using the PrivateKey on the given message and + // returns the signature. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte + + // Checks whether the given signature is a valid signature set by + // the private key corresponding to the given public key on the + // given message. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool + + // Deterministically derives a keypair from a seed. If you're unsure, + // you're better off using GenerateKey(). + // + // Panics if seed is not of length SeedSize(). + DeriveKey(seed []byte) (PublicKey, PrivateKey) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPublicKey([]byte) (PublicKey, error) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error) + + // Size of binary marshalled public keys. + PublicKeySize() int + + // Size of binary marshalled public keys. + PrivateKeySize() int + + // Size of signatures. + SignatureSize() int + + // Size of seeds. + SeedSize() int + + // Returns whether contexts are supported. + SupportsContext() bool +} + +var ( + // ErrTypeMismatch is the error used if types of, for instance, private + // and public keys don't match. + ErrTypeMismatch = errors.New("types mismatch") + + // ErrSeedSize is the error used if the provided seed is of the wrong + // size. + ErrSeedSize = errors.New("wrong seed size") + + // ErrPubKeySize is the error used if the provided public key is of + // the wrong size. + ErrPubKeySize = errors.New("wrong size for public key") + + // ErrPrivKeySize is the error used if the provided private key is of + // the wrong size. + ErrPrivKeySize = errors.New("wrong size for private key") + + // ErrContextNotSupported is the error used if a context is not + // supported. + ErrContextNotSupported = errors.New("context not supported") +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/BUILD.bazel b/vendor/github.com/cyphar/filepath-securejoin/BUILD.bazel new file mode 100644 index 00000000..f681bfcf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "filepath-securejoin", + srcs = [ + "join.go", + "lookup_linux.go", + "mkdir_linux.go", + "open_linux.go", + "openat2_linux.go", + "openat_linux.go", + "procfs_linux.go", + "testing_mocks_linux.go", + "vfs.go", + ], + importmap = "peridot.resf.org/vendor/github.com/cyphar/filepath-securejoin", + importpath = "github.com/cyphar/filepath-securejoin", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 00000000..cb1ab88d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 00000000..253956f8 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,168 @@ +## `filepath-securejoin` ## + +[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) + +### Old API ### + +This library was originally just an implementation of `SecureJoin` which was +[intended to be included in the Go standard library][go#20126] as a safer +`filepath.Join` that would restrict the path lookup to be inside a root +directory. + +The implementation was based on code that existed in several container +runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers +that can modify path components after `SecureJoin` returns and before the +caller uses the path, allowing for some fairly trivial TOCTOU attacks. + +`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to +support legacy users, but new users are strongly suggested to avoid using +`SecureJoin` and instead use the [new api](#new-api) or switch to +[libpathrs][libpathrs]. + +With the above limitations in mind, this library guarantees the following: + +* If no error is set, the resulting string **must** be a child path of + `root` and will not contain any symlink path components (they will all be + expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existent path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path and is trustworthy): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[libpathrs]: https://github.com/openSUSE/libpathrs +[go#20126]: https://github.com/golang/go/issues/20126 + +### New API ### + +While we recommend users switch to [libpathrs][libpathrs] as soon as it has a +stable release, some methods implemented by libpathrs have been ported to this +library to ease the transition. These APIs are only supported on Linux. + +These APIs are implemented such that `filepath-securejoin` will +opportunistically use certain newer kernel APIs that make these operations far +more secure. In particular: + +* All of the lookup operations will use [`openat2`][openat2.2] on new enough + kernels (Linux 5.6 or later) to restrict lookups through magic-links and + bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to + efficiently resolve symlinks within a rootfs. + +* The APIs provide hardening against a malicious `/proc` mount to either detect + or avoid being tricked by a `/proc` that is not legitimate. This is done + using [`openat2`][openat2.2] for all users, and privileged users will also be + further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] + (Linux 4.18 or later). + +[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html +[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md +[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md + +#### `OpenInRoot` #### + +```go +func OpenInRoot(root, unsafePath string) (*os.File, error) +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) +func Reopen(handle *os.File, flags int) (*os.File, error) +``` + +`OpenInRoot` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +``` + +that protects against various race attacks that could lead to serious security +issues, depending on the application. Note that the returned `*os.File` is an +`O_PATH` file descriptor, which is quite restricted. Callers will probably need +to use `Reopen` to get a more usable handle (this split is done to provide +useful features like PTY spawning and to avoid users accidentally opening bad +inodes that could cause a DoS). + +Callers need to be careful in how they use the returned `*os.File`. Usually it +is only safe to operate on the handle directly, and it is very easy to create a +security issue. [libpathrs][libpathrs] provides far more helpers to make using +these handles safer -- there is currently no plan to port them to +`filepath-securejoin`. + +`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an +`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or +`MkdirAllHandle`) calls are operating on the same rootfs. + +> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. + +#### `MkdirAll` #### + +```go +func MkdirAll(root, unsafePath string, mode int) error +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error) +``` + +`MkdirAll` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +err = os.MkdirAll(path, mode) +``` + +that protects against the same kinds of races that `OpenInRoot` protects +against. + +`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an +`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an +`*os.File` of the final created directory is returned (this directory is +guaranteed to be effectively identical to the directory created by +`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot` +after `MkdirAll`). + +> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. This means that `MkdirAll` will not +> create non-existent directories referenced by a dangling symlink. + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 00000000..0d91a54c --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.3.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 00000000..bd86a48b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,130 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "errors" + "os" + "path/filepath" + "strings" + "syscall" +) + +const maxSymlinkLimit = 255 + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +// +// NOTE: Due to the above limitation, Linux users are strongly encouraged to +// use OpenInRoot instead, which does safely protect against these kinds of +// attacks. There is no way to solve this problem with SecureJoinVFS because +// the API is fundamentally wrong (you cannot return a "safe" path string and +// guarantee it won't be modified afterwards). +// +// Volume names in unsafePath are always discarded, regardless if they are +// provided via direct input or when evaluating symlinks. Therefore: +// +// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + unsafePath = filepath.FromSlash(unsafePath) + var ( + currentPath string + remainingPath = unsafePath + linksWalked int + ) + for remainingPath != "" { + if v := filepath.VolumeName(remainingPath); v != "" { + remainingPath = remainingPath[len(v):] + } + + // Get the next path component. + var part string + if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := filepath.Join(string(filepath.Separator), currentPath, part) + if nextPath == string(filepath.Separator) { + currentPath = "" + continue + } + fullPath := root + string(filepath.Separator) + nextPath + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullPath) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + currentPath = nextPath + continue + } + + // It's a symlink, so get its contents and expand it by prepending it + // to the yet-unparsed path. + linksWalked++ + if linksWalked > maxSymlinkLimit { + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } + + dest, err := vfs.Readlink(fullPath) + if err != nil { + return "", err + } + remainingPath = dest + string(filepath.Separator) + remainingPath + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + currentPath = "" + } + } + + // There should be no lexical components like ".." left in the path here, + // but for safety clean up the path before joining it to the root. + finalPath := filepath.Join(string(filepath.Separator), currentPath) + return filepath.Join(root, finalPath), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go new file mode 100644 index 00000000..140ac18f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go @@ -0,0 +1,380 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +type symlinkStackEntry struct { + // (dir, remainingPath) is what we would've returned if the link didn't + // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in + // this case. + dir *os.File + remainingPath string + // linkUnwalked is the remaining path components from the original + // Readlink which we have yet to walk. When this slice is empty, we + // drop the link from the stack. + linkUnwalked []string +} + +func (se symlinkStackEntry) String() string { + return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) +} + +func (se symlinkStackEntry) Close() { + _ = se.dir.Close() +} + +type symlinkStack []*symlinkStackEntry + +func (s symlinkStack) IsEmpty() bool { + return len(s) == 0 +} + +func (s *symlinkStack) Close() { + for _, link := range *s { + link.Close() + } + // TODO: Switch to clear once we switch to Go 1.21. + *s = nil +} + +var ( + errEmptyStack = errors.New("[internal] stack is empty") + errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") +) + +func (s *symlinkStack) popPart(part string) error { + if s.IsEmpty() { + // If there is nothing in the symlink stack, then the part was from the + // real path provided by the user, and this is a no-op. + return errEmptyStack + } + tailEntry := (*s)[len(*s)-1] + + // Double-check that we are popping the component we expect. + if len(tailEntry.linkUnwalked) == 0 { + return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) + } + headPart := tailEntry.linkUnwalked[0] + if headPart != part { + return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) + } + + // Drop the component, but keep the entry around in case we are dealing + // with a "tail-chained" symlink. + tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] + return nil +} + +func (s *symlinkStack) PopPart(part string) error { + if err := s.popPart(part); err != nil { + if errors.Is(err, errEmptyStack) { + // Skip empty stacks. + err = nil + } + return err + } + + // Clean up any of the trailing stack entries that are empty. + for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { + entry := (*s)[lastGood] + if len(entry.linkUnwalked) > 0 { + break + } + entry.Close() + (*s) = (*s)[:lastGood] + } + return nil +} + +func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { + // Split the link target and clean up any "" parts. + linkTargetParts := slices.DeleteFunc( + strings.Split(linkTarget, "/"), + func(part string) bool { return part == "" }) + + // Don't add a no-op link to the stack. You can't create a no-op link + // symlink, but if the symlink is /, partialLookupInRoot has already jumped to the + // root and so there's nothing more to do. + if len(linkTargetParts) == 0 { + return nil + } + + // Copy the directory so the caller doesn't close our copy. + dirCopy, err := dupFile(dir) + if err != nil { + return err + } + + // Add to the stack. + *s = append(*s, &symlinkStackEntry{ + dir: dirCopy, + remainingPath: remainingPath, + linkUnwalked: linkTargetParts, + }) + return nil +} + +func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { + // If we are currently inside a symlink resolution, remove the symlink + // component from the last symlink entry, but don't remove the entry even + // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we + // hit during a symlink resolution) we need to keep the old symlink until + // we finish the resolution. + if err := s.popPart(linkPart); err != nil { + if !errors.Is(err, errEmptyStack) { + return err + } + // Push the component regardless of whether the stack was empty. + } + return s.push(dir, remainingPath, linkTarget) +} + +func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { + if s.IsEmpty() { + return nil, "", false + } + tailEntry := (*s)[0] + *s = (*s)[1:] + return tailEntry.dir, tailEntry.remainingPath, true +} + +// partialLookupInRoot tries to lookup as much of the request path as possible +// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing +// component of the requested path, returning a file handle to the final +// existing component and a string containing the remaining path components. +func partialLookupInRoot(root *os.File, unsafePath string) (_ *os.File, _ string, Err error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // This is very similar to SecureJoin, except that we operate on the + // components using file descriptors. We then return the last component we + // managed open, along with the remaining path components not opened. + + // Try to use openat2 if possible. + if hasOpenat2() { + return partialLookupOpenat2(root, unsafePath) + } + + // Get the "actual" root path from /proc/self/fd. This is necessary if the + // root is some magic-link like /proc/$pid/root, in which case we want to + // make sure when we do checkProcSelfFdPath that we are using the correct + // root path. + logicalRootPath, err := procSelfFdReadlink(root) + if err != nil { + return nil, "", fmt.Errorf("get real root path: %w", err) + } + + currentDir, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + defer func() { + if Err != nil { + _ = currentDir.Close() + } + }() + + // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats + // dangling symlinks. If we hit a non-existent path while resolving a + // symlink, we need to return the (dir, remainingPath) that we had when we + // hit the symlink (treating the symlink as though it were a regular file). + // The set of (dir, remainingPath) sets is stored within the symlinkStack + // and we add and remove parts when we hit symlink and non-symlink + // components respectively. We need a stack because of recursive symlinks + // (symlinks that contain symlink components in their target). + // + // Note that the stack is ONLY used for book-keeping. All of the actual + // path walking logic is still based on currentPath/remainingPath and + // currentDir (as in SecureJoin). + var symlinkStack symlinkStack + defer symlinkStack.Close() + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Save the current remaining path so if the part is not real we can + // return the path including the component. + oldRemainingPath := remainingPath + + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + // Skip any "//" components. + if part == "" { + continue + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + if err := symlinkStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) + } + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch { + case err == nil: + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, "", fmt.Errorf("stat component %q: %w", part, err) + } + + switch st.Mode() & os.ModeType { + case os.ModeDir: + // If we are dealing with a directory, simply walk into it. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + + // The part was real, so drop it from the symlink stack. + if err := symlinkStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) + } + + // If we are operating on a .., make sure we haven't escaped. + // We only have to check for ".." here because walking down + // into a regular component component cannot cause you to + // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we + // have to check every ".." rather than only checking after a + // rename or mount on the system. + if part == ".." { + // Make sure the root hasn't moved. + if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + return nil, "", fmt.Errorf("root path moved during lookup: %w", err) + } + // Make sure the path is what we expect. + fullPath := logicalRootPath + nextPath + if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) + } + } + + case os.ModeSymlink: + // We don't need the handle anymore. + _ = nextDir.Close() + + // Unfortunately, we cannot readlink through our handle and so + // we need to do a separate readlinkat (which could race to + // give us an error if the attacker swapped the symlink with a + // non-symlink). + linkDest, err := readlinkatFile(currentDir, part) + if err != nil { + if errors.Is(err, unix.EINVAL) { + // The part was not a symlink, so assume that it's a + // regular file. It is possible for it to be a + // directory (if an attacker is swapping a directory + // and non-directory at this subpath) but erroring out + // here is better anyway. + err = fmt.Errorf("%w: path component %q is invalid: %w", errPossibleAttack, part, unix.ENOTDIR) + } + return nil, "", err + } + + linksWalked++ + if linksWalked > maxSymlinkLimit { + return nil, "", &os.PathError{Op: "partialLookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} + } + + // Swap out the symlink's component for the link entry itself. + if err := symlinkStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { + return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks reset any work we've already done. + if path.IsAbs(linkDest) { + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = "/" + } + default: + // For any other file type, we can't walk further and so we've + // hit the end of the lookup. The handling is very similar to + // ENOENT from openat(2), except that we return a handle to the + // component we just walked into (and we drop the component + // from the symlink stack). + _ = currentDir.Close() + + // The part existed, so drop it from the symlink stack. + if err := symlinkStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into non-directory %q failed: %w", part, err) + } + + // If there are any remaining components in the symlink stack, + // we are still within a symlink resolution and thus we hit a + // dangling symlink. So pretend that the first symlink in the + // stack we hit was an ENOENT (to match openat2). + if oldDir, remainingPath, ok := symlinkStack.PopTopSymlink(); ok { + _ = nextDir.Close() + return oldDir, remainingPath, nil + } + + // The current component exists, so return it. + return nextDir, remainingPath, nil + } + + case errors.Is(err, os.ErrNotExist): + // If there are any remaining components in the symlink stack, we + // are still within a symlink resolution and thus we hit a dangling + // symlink. So pretend that the first symlink in the stack we hit + // was an ENOENT (to match openat2). + if oldDir, remainingPath, ok := symlinkStack.PopTopSymlink(); ok { + _ = currentDir.Close() + return oldDir, remainingPath, nil + } + // We have hit a final component that doesn't exist, so we have our + // partial open result. Note that we have to use the OLD remaining + // path, since the lookup failed. + return currentDir, oldRemainingPath, nil + + default: + return nil, "", err + } + } + // All of the components existed! + return currentDir, "", nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go new file mode 100644 index 00000000..05e0bde9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -0,0 +1,228 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +var ( + errInvalidMode = errors.New("invalid permission mode") + errPossibleAttack = errors.New("possible attack detected") +) + +// MkdirAllHandle is equivalent to MkdirAll, except that it is safer to use in +// two respects: +// +// - The caller provides the root directory as an *os.File (preferably O_PATH) +// handle. This means that the caller can be sure which root directory is +// being used. Note that this can be emulated by using /proc/self/fd/... as +// the root path with MkdirAll. +// +// - Once all of the directories have been created, an *os.File (O_PATH) handle +// to the directory at unsafePath is returned to the caller. This is done in +// an effectively-race-free way (an attacker would only be able to swap the +// final directory component), which is not possible to emulate with +// MkdirAll. +// +// In addition, the returned handle is obtained far more efficiently than doing +// a brand new lookup of unsafePath (such as with SecureJoin or openat2) after +// doing MkdirAll. If you intend to open the directory after creating it, you +// should use MkdirAllHandle. +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { + // Make sure there are no os.FileMode bits set. + if mode&^0o7777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) + } + + // Try to open as much of the path as possible. + currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) + if err != nil { + return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) + } + defer func() { + if Err != nil { + _ = currentDir.Close() + } + }() + + // If there is an attacker deleting directories as we walk into them, + // detect this proactively. Note this is guaranteed to detect if the + // attacker deleted any part of the tree up to currentDir. + // + // Once we walk into a dead directory, partialLookupInRoot would not be + // able to walk further down the tree (directories must be empty before + // they are deleted), and if the attacker has removed the entire tree we + // can be sure that anything that was originally inside a dead directory + // must also be deleted and thus is a dead directory in its own right. + // + // This is mostly a quality-of-life check, because mkdir will simply fail + // later if the attacker deletes the tree after this check. + if err := isDeadInode(currentDir); err != nil { + return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) + } + + // Re-open the path to match the O_DIRECTORY reopen loop later (so that we + // always return a non-O_PATH handle). We also check that we actually got a + // directory. + if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { + return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) + } else if err != nil { + return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) + } else { + currentDir = reopenDir + } + + remainingParts := strings.Split(remainingPath, string(filepath.Separator)) + if slices.Contains(remainingParts, "..") { + // The path contained ".." components after the end of the "real" + // components. We could try to safely resolve ".." here but that would + // add a bunch of extra logic for something that it's not clear even + // needs to be supported. So just return an error. + // + // If we do filepath.Clean(remainingPath) then we end up with the + // problem that ".." can erase a trailing dangling symlink and produce + // a path that doesn't quite match what the user asked for. + return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) + } + + // Make sure the mode doesn't have any type bits. + mode &^= unix.S_IFMT + // What properties do we expect any newly created directories to have? + var ( + // While umask(2) is a per-thread property, and thus this value could + // vary between threads, a functioning Go program would LockOSThread + // threads with different umasks and so we don't need to LockOSThread + // for this entire mkdirat loop (if we are in the locked thread with a + // different umask, we are already locked and there's nothing for us to + // do -- and if not then it doesn't matter which thread we run on and + // there's nothing for us to do). + expectedMode = uint32(unix.S_IFDIR | (mode &^ getUmask())) + + // We would want to get the fs[ug]id here, but we can't access those + // from userspace. In practice, nobody uses setfs[ug]id() anymore, so + // just use the effective [ug]id (which is equivalent to the fs[ug]id + // for programs that don't use setfs[ug]id). + expectedUid = uint32(unix.Geteuid()) + expectedGid = uint32(unix.Getegid()) + ) + + // Create the remaining components. + for _, part := range remainingParts { + switch part { + case "", ".": + // Skip over no-op paths. + continue + } + + // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely + // create the finaly component without worrying about symlink-exchange + // attacks. + if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil { + err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} + // Make the error a bit nicer if the directory is dead. + if err2 := isDeadInode(currentDir); err2 != nil { + err = fmt.Errorf("%w (%w)", err, err2) + } + return nil, err + } + + // Get a handle to the next component. O_DIRECTORY means we don't need + // to use O_PATH. + var nextDir *os.File + if hasOpenat2() { + nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, + }) + } else { + nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + } + if err != nil { + return nil, err + } + _ = currentDir.Close() + currentDir = nextDir + + // Make sure that the directory matches what we expect. An attacker + // could have swapped the directory between us making it and opening + // it. There's no way for us to be sure that the directory is + // _precisely_ the same as the directory we created, but if we are in + // an empty directory with the same owner and mode as the one we + // created then there is nothing the attacker could do with this new + // directory that they couldn't do with the old one. + if stat, err := fstat(currentDir); err != nil { + return nil, fmt.Errorf("check newly created directory: %w", err) + } else { + if stat.Mode != expectedMode { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect mode 0o%.3o (expected 0o%.3o)", errPossibleAttack, currentDir.Name(), stat.Mode, expectedMode) + } + if stat.Uid != expectedUid || stat.Gid != expectedGid { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect owner %d:%d (expected %d:%d)", errPossibleAttack, currentDir.Name(), stat.Uid, stat.Gid, expectedUid, expectedGid) + } + // Check that the directory is empty. We only need to check for + // a single entry, and we should get EOF if the directory is + // empty. + _, err := currentDir.Readdirnames(1) + if !errors.Is(err, io.EOF) { + if err == nil { + err = fmt.Errorf("%w: newly created directory %q is non-empty", errPossibleAttack, currentDir.Name()) + } + return nil, fmt.Errorf("check if newly created directory %q is empty: %w", currentDir.Name(), err) + } + // Reset the offset. + _, _ = currentDir.Seek(0, unix.SEEK_SET) + } + } + return currentDir, nil +} + +// MkdirAll is a race-safe alternative to the Go stdlib's os.MkdirAll function, +// where the new directory is guaranteed to be within the root directory (if an +// attacker can move directories from inside the root to outside the root, the +// created directory tree might be outside of the root but the key constraint +// is that at no point will we walk outside of the directory tree we are +// creating). +// +// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// err := os.MkdirAll(path, mode) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and MkdirAll, it is +// possible for MkdirAll to resolve unsafe symlink components and create +// directories outside of the root. +// +// If you plan to open the directory after you have created it or want to use +// an open directory handle as the root, you should use MkdirAllHandle instead. +// This function is a wrapper around MkdirAllHandle. +// +// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not +// the Go generic mode bits (os.Mode...). +func MkdirAll(root, unsafePath string, mode int) error { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return err + } + defer rootDir.Close() + + f, err := MkdirAllHandle(rootDir, unsafePath, mode) + if err != nil { + return err + } + _ = f.Close() + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go new file mode 100644 index 00000000..21700612 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go @@ -0,0 +1,83 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// OpenatInRoot is equivalent to OpenInRoot, except that the root is provided +// using an *os.File handle, to ensure that the correct root directory is used. +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, remainingPath, err := partialLookupInRoot(root, unsafePath) + if err != nil { + return nil, err + } + if remainingPath != "" { + _ = handle.Close() + return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: unix.ENOENT} + } + return handle, nil +} + +// OpenInRoot safely opens the provided unsafePath within the root. +// Effectively, OpenInRoot(root, unsafePath) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and OpenFile, it is +// possible for the returned file to be outside of the root. +// +// Note that the returned handle is an O_PATH handle, meaning that only a very +// limited set of operations will work on the handle. This is done to avoid +// accidentally opening an untrusted file that could cause issues (such as a +// disconnected TTY that could cause a DoS, or some other issue). In order to +// use the returned handle, you can "upgrade" it to a proper handle using +// Reopen. +func OpenInRoot(root, unsafePath string) (*os.File, error) { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer rootDir.Close() + return OpenatInRoot(rootDir, unsafePath) +} + +// Reopen takes an *os.File handle and re-opens it through /proc/self/fd. +// Reopen(file, flags) is effectively equivalent to +// +// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) +// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) +// +// But with some extra hardenings to ensure that we are not tricked by a +// maliciously-configured /proc mount. While this attack scenario is not +// common, in container runtimes it is possible for higher-level runtimes to be +// tricked into configuring an unsafe /proc that can be used to attack file +// operations. See CVE-2019-19921 for more details. +func Reopen(handle *os.File, flags int) (*os.File, error) { + procRoot, err := getProcRoot() + if err != nil { + return nil, err + } + + flags |= unix.O_CLOEXEC + fdPath := fmt.Sprintf("fd/%d", handle.Fd()) + return doProcSelfMagiclink(procRoot, fdPath, func(procDirHandle *os.File, base string) (*os.File, error) { + // Rather than just wrapping openatFile, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procDirHandle.Fd()), base, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil + }) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go new file mode 100644 index 00000000..fc93db86 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -0,0 +1,128 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "golang.org/x/sys/unix" +) + +var ( + hasOpenat2Bool bool + hasOpenat2Once sync.Once + + testingForceHasOpenat2 *bool +) + +func hasOpenat2() bool { + if testing.Testing() && testingForceHasOpenat2 != nil { + return *testingForceHasOpenat2 + } + hasOpenat2Once.Do(func() { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err == nil { + hasOpenat2Bool = true + _ = unix.Close(fd) + } + }) + return hasOpenat2Bool +} + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +const scopedLookupMaxRetries = 10 + +func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { + fullPath := dir.Name() + "/" + path + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for tries < scopedLookupMaxRetries { + fd, err := unix.Openat2(int(dir.Fd()), path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise + // you'll get infinite recursion here. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { + fullPath = actualPath + } + } + return os.NewFile(uintptr(fd), fullPath), nil + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2File(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx += 1 + } + // We found a subpath! + return handle, unsafePath[endIdx:], nil + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go new file mode 100644 index 00000000..949fb5f2 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go @@ -0,0 +1,59 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +func dupFile(f *os.File) (*os.File, error) { + fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + +func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} + } + // All of the paths we use with openatFile(2) are guaranteed to be + // lexically safe, so we can use path.Join here. + fullPath := filepath.Join(dir.Name(), path) + return os.NewFile(uintptr(fd), fullPath), nil +} + +func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} + } + return stat, nil +} + +func readlinkatFile(dir *os.File, path string) (string, error) { + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} + } + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go new file mode 100644 index 00000000..daac3f06 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -0,0 +1,481 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + + "golang.org/x/sys/unix" +) + +func fstat(f *os.File) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(f.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} + } + return stat, nil +} + +func fstatfs(f *os.File) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} + } + return statfs, nil +} + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +func verifyProcRoot(procRoot *os.File) error { + if statfs, err := fstatfs(procRoot); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + if stat, err := fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +var ( + hasNewMountApiBool bool + hasNewMountApiOnce sync.Once +) + +func hasNewMountApi() bool { + hasNewMountApiOnce.Do(func() { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err == nil { + hasNewMountApiBool = true + _ = unix.Close(fd) + } + }) + return hasNewMountApiBool +} + +func fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +func newPrivateProcMount() (*os.File, error) { + procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() + + // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) +} + +func openTree(dir *os.File, path string, flags uint) (*os.File, error) { + dirFd := -int(unix.EBADF) + dirName := "." + if dir != nil { + dirFd = int(dir.Fd()) + dirName = dir.Name() + } + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} + } + return os.NewFile(uintptr(fd), dirName+"/"+path), nil +} + +func clonePrivateProcMount() (_ *os.File, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || testingForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { + procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procfsHandle.Close() + } + }() + if err := verifyProcRoot(procfsHandle); err != nil { + return nil, err + } + return procfsHandle, nil +} + +func privateProcRoot() (*os.File, error) { + if !hasNewMountApi() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount() + if err != nil || testingForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +var ( + procRootHandle *os.File + procRootError error + procRootOnce sync.Once + + errUnsafeProcfs = errors.New("unsafe procfs detected") +) + +func unsafeHostProcRoot() (_ *os.File, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + return procRoot, nil +} + +func doGetProcRoot() (*os.File, error) { + procRoot, err := privateProcRoot() + if err != nil || testingForceGetProcRootUnsafe(procRoot) { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + procRoot, err = unsafeHostProcRoot() + } + return procRoot, err +} + +func getProcRoot() (*os.File, error) { + procRootOnce.Do(func() { + procRootHandle, procRootError = doGetProcRoot() + }) + return procRootHandle, procRootError +} + +var ( + haveProcThreadSelf bool + haveProcThreadSelfOnce sync.Once +) + +type procThreadSelfCloser func() + +// procThreadSelf returns a handle to /proc/thread-self/ (or an +// equivalent handle on older kernels where /proc/thread-self doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// This is similar to ProcThreadSelf from runc, but with extra hardening +// applied and using *os.File. +func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { + haveProcThreadSelfOnce.Do(func() { + // If the kernel doesn't support thread-self, it doesn't matter which + // /proc handle we use. + _, err := fstatatFile(procRoot, "thread-self", unix.AT_SYMLINK_NOFOLLOW) + haveProcThreadSelf = (err == nil) + }) + + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get interrupted + // by the Go runtime and hit the case where the underlying thread is + // swapped out and the original thread is killed, resulting in + // pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + } + }() + + // Figure out what prefix we want to use. + threadSelf := "thread-self/" + if !haveProcThreadSelf || testingForceProcSelfTask() { + /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" + if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || testingForceProcSelf() { + // In this case, we running in a pid namespace that doesn't match + // the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID to + // use here because of the age of the kernel, so we have to just + // use /proc/self and hope that it works. + threadSelf = "self/" + } + } + + // Grab the handle. + var ( + handle *os.File + err error + ) + if hasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // stricly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses + // procSelfFdReadlink to clean up the returned f.Name() if we use + // RESOLVE_IN_ROOT (which would lead to an infinite recursion). + handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + } else { + handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_CLOEXEC, 0) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + defer func() { + if Err != nil { + _ = handle.Close() + } + }() + // We can't detect bind-mounts of different parts of procfs on top of + // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we + // aren't on the wrong filesystem here. + if statfs, err := fstatfs(handle); err != nil { + return nil, nil, err + } else if statfs.Type != procSuperMagic { + return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + } + return handle, runtime.UnlockOSThread, nil +} + +var ( + hasStatxMountIdBool bool + hasStatxMountIdOnce sync.Once +) + +func hasStatxMountId() bool { + hasStatxMountIdOnce.Do(func() { + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) + hasStatxMountIdBool = (err == nil && (stx.Mask&wantStxMask != 0)) + }) + return hasStatxMountIdBool +} + +func checkSymlinkOvermount(dir *os.File, path string) error { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountId() { + return nil + } + + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + + // Get the mntId of our procfs handle. + err := unix.Statx(int(dir.Fd()), "", unix.AT_EMPTY_PATH, int(wantStxMask), &stx) + if err != nil { + return &os.PathError{Op: "statx", Path: dir.Name(), Err: err} + } + if stx.Mask&wantStxMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + return fmt.Errorf("%w: could not get mnt id of dir %s", errUnsafeProcfs, dir.Name()) + } + expectedMountId := stx.Mnt_id + + // Get the mntId of the target symlink. + stx = unix.Statx_t{} + err = unix.Statx(int(dir.Fd()), path, unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) + if err != nil { + return &os.PathError{Op: "statx", Path: dir.Name() + "/" + path, Err: err} + } + if stx.Mask&wantStxMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + return fmt.Errorf("%w: could not get mnt id of symlink %s", errUnsafeProcfs, path) + } + gotMountId := stx.Mnt_id + + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountId != gotMountId { + return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) + } + return nil +} + +func doProcSelfMagiclink[T any](procRoot *os.File, subPath string, fn func(procDirHandle *os.File, base string) (T, error)) (T, error) { + // We cannot operate on the magic-link directly with a handle, we need to + // create a handle to the parent of the magic-link and then do + // single-component operations on it. + dir, base := filepath.Dir(subPath), filepath.Base(subPath) + + procDirHandle, closer, err := procThreadSelf(procRoot, dir) + if err != nil { + return *new(T), fmt.Errorf("get safe /proc/thread-self/%s handle: %w", dir, err) + } + defer procDirHandle.Close() + defer closer() + + // Try to detect if there is a mount on top of the symlink we are about to + // read. If we are using unsafeHostProcRoot(), this could change after we + // check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSymlinkOvermount(procDirHandle, base); err != nil { + return *new(T), fmt.Errorf("check safety of %s proc magiclink: %w", subPath, err) + } + return fn(procDirHandle, base) +} + +func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { + fdPath := fmt.Sprintf("fd/%d", fd) + return doProcSelfMagiclink(procRoot, fdPath, readlinkatFile) +} + +func rawProcSelfFdReadlink(fd int) (string, error) { + procRoot, err := getProcRoot() + if err != nil { + return "", err + } + return doRawProcSelfFdReadlink(procRoot, fd) +} + +func procSelfFdReadlink(f *os.File) (string, error) { + return rawProcSelfFdReadlink(int(f.Fd())) +} + +var ( + errPossibleBreakout = errors.New("possible breakout detected") + errInvalidDirectory = errors.New("wandered into deleted directory") + errDeletedInode = errors.New("cannot verify path of deleted inode") +) + +func isDeadInode(file *os.File) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := errDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = errInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} + +func getUmask() int { + // umask is a per-thread property, but it is inherited by children, so we + // need to lock our OS thread to make sure that no other goroutine runs in + // this thread and no goroutines are spawned from this thread until we + // revert to the old umask. + // + // We could parse /proc/self/status to avoid this get-set problem, but + // /proc/thread-self requires LockOSThread anyway, so there's no real + // benefit over just using umask(2). + runtime.LockOSThread() + umask := unix.Umask(0) + unix.Umask(umask) + runtime.UnlockOSThread() + return umask +} + +func checkProcSelfFdPath(path string, file *os.File) error { + if err := isDeadInode(file); err != nil { + return err + } + actualPath, err := procSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) + } + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go new file mode 100644 index 00000000..2a25d08e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go @@ -0,0 +1,68 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "testing" +) + +type forceGetProcRootLevel int + +const ( + forceGetProcRootDefault forceGetProcRootLevel = iota + forceGetProcRootOpenTree // force open_tree() + forceGetProcRootOpenTreeAtRecursive // force open_tree(AT_RECURSIVE) + forceGetProcRootUnsafe // force open() +) + +var testingForceGetProcRoot *forceGetProcRootLevel + +func testingCheckClose(check bool, f *os.File) bool { + if check { + if f != nil { + _ = f.Close() + } + return true + } + return false +} + +func testingForcePrivateProcRootOpenTree(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTree, f) +} + +func testingForcePrivateProcRootOpenTreeAtRecursive(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTreeAtRecursive, f) +} + +func testingForceGetProcRootUnsafe(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootUnsafe, f) +} + +type forceProcThreadSelfLevel int + +const ( + forceProcThreadSelfDefault forceProcThreadSelfLevel = iota + forceProcSelfTask + forceProcSelf +) + +var testingForceProcThreadSelf *forceProcThreadSelfLevel + +func testingForceProcSelfTask() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelfTask +} + +func testingForceProcSelf() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelf +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 00000000..6e27c7dd --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/emirpasic/gods/containers/containers.go b/vendor/github.com/emirpasic/gods/containers/containers.go index c35ab36d..a512a3cb 100644 --- a/vendor/github.com/emirpasic/gods/containers/containers.go +++ b/vendor/github.com/emirpasic/gods/containers/containers.go @@ -21,10 +21,11 @@ type Container interface { Size() int Clear() Values() []interface{} + String() string } // GetSortedValues returns sorted container's elements with respect to the passed comparator. -// Does not effect the ordering of elements within the container. +// Does not affect the ordering of elements within the container. func GetSortedValues(container Container, comparator utils.Comparator) []interface{} { values := container.Values() if len(values) < 2 { diff --git a/vendor/github.com/emirpasic/gods/containers/enumerable.go b/vendor/github.com/emirpasic/gods/containers/enumerable.go index ac48b545..70660054 100644 --- a/vendor/github.com/emirpasic/gods/containers/enumerable.go +++ b/vendor/github.com/emirpasic/gods/containers/enumerable.go @@ -11,11 +11,9 @@ type EnumerableWithIndex interface { // Map invokes the given function once for each element and returns a // container containing the values returned by the given function. - // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining) // Map(func(index int, value interface{}) interface{}) Container // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) // Select(func(index int, value interface{}) bool) Container // Any passes each element of the container to the given function and @@ -39,11 +37,9 @@ type EnumerableWithKey interface { // Map invokes the given function once for each element and returns a container // containing the values returned by the given function as key/value pairs. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) // Select(func(key interface{}, value interface{}) bool) Container // Any passes each element of the container to the given function and diff --git a/vendor/github.com/emirpasic/gods/containers/iterator.go b/vendor/github.com/emirpasic/gods/containers/iterator.go index f1a52a36..73994ec8 100644 --- a/vendor/github.com/emirpasic/gods/containers/iterator.go +++ b/vendor/github.com/emirpasic/gods/containers/iterator.go @@ -28,6 +28,12 @@ type IteratorWithIndex interface { // If First() returns true, then first element's index and value can be retrieved by Index() and Value(). // Modifies the state of the iterator. First() bool + + // NextTo moves the iterator to the next element from current position that satisfies the condition given by the + // passed function, and returns true if there was a next element in the container. + // If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + NextTo(func(index int, value interface{}) bool) bool } // IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. @@ -54,6 +60,12 @@ type IteratorWithKey interface { // If First() returns true, then first element's key and value can be retrieved by Key() and Value(). // Modifies the state of the iterator. First() bool + + // NextTo moves the iterator to the next element from current position that satisfies the condition given by the + // passed function, and returns true if there was a next element in the container. + // If NextTo() returns true, then next element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + NextTo(func(key interface{}, value interface{}) bool) bool } // ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. @@ -80,6 +92,12 @@ type ReverseIteratorWithIndex interface { // Modifies the state of the iterator. Last() bool + // PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the + // passed function, and returns true if there was a next element in the container. + // If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + PrevTo(func(index int, value interface{}) bool) bool + IteratorWithIndex } @@ -105,5 +123,11 @@ type ReverseIteratorWithKey interface { // Modifies the state of the iterator. Last() bool + // PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the + // passed function, and returns true if there was a next element in the container. + // If PrevTo() returns true, then next element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + PrevTo(func(key interface{}, value interface{}) bool) bool + IteratorWithKey } diff --git a/vendor/github.com/emirpasic/gods/containers/serialization.go b/vendor/github.com/emirpasic/gods/containers/serialization.go index d7c90c83..fd9cbe23 100644 --- a/vendor/github.com/emirpasic/gods/containers/serialization.go +++ b/vendor/github.com/emirpasic/gods/containers/serialization.go @@ -8,10 +8,14 @@ package containers type JSONSerializer interface { // ToJSON outputs the JSON representation of containers's elements. ToJSON() ([]byte, error) + // MarshalJSON @implements json.Marshaler + MarshalJSON() ([]byte, error) } // JSONDeserializer provides JSON deserialization type JSONDeserializer interface { // FromJSON populates containers's elements from the input JSON representation. FromJSON([]byte) error + // UnmarshalJSON @implements json.Unmarshaler + UnmarshalJSON([]byte) error } diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go index bfedac9e..60ce4583 100644 --- a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go @@ -17,9 +17,8 @@ import ( "github.com/emirpasic/gods/utils" ) -func assertListImplementation() { - var _ lists.List = (*List)(nil) -} +// Assert List implementation +var _ lists.List = (*List)(nil) // List holds the elements in a slice type List struct { @@ -83,8 +82,8 @@ func (list *List) Contains(values ...interface{}) bool { for _, searchValue := range values { found := false - for _, element := range list.elements { - if element == searchValue { + for index := 0; index < list.size; index++ { + if list.elements[index] == searchValue { found = true break } diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go index b3a87388..8bd60b0a 100644 --- a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go @@ -6,9 +6,8 @@ package arraylist import "github.com/emirpasic/gods/containers" -func assertEnumerableImplementation() { - var _ containers.EnumerableWithIndex = (*List)(nil) -} +// Assert Enumerable implementation +var _ containers.EnumerableWithIndex = (*List)(nil) // Each calls the given function once for each element, passing that element's index and value. func (list *List) Each(f func(index int, value interface{})) { diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go index 38a93f3a..f9efe20c 100644 --- a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go @@ -6,9 +6,8 @@ package arraylist import "github.com/emirpasic/gods/containers" -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} +// Assert Iterator implementation +var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) // Iterator holding the iterator's state type Iterator struct { @@ -81,3 +80,31 @@ func (iterator *Iterator) Last() bool { iterator.End() return iterator.Prev() } + +// NextTo moves the iterator to the next element from current position that satisfies the condition given by the +// passed function, and returns true if there was a next element in the container. +// If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) NextTo(f func(index int, value interface{}) bool) bool { + for iterator.Next() { + index, value := iterator.Index(), iterator.Value() + if f(index, value) { + return true + } + } + return false +} + +// PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the +// passed function, and returns true if there was a next element in the container. +// If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) PrevTo(f func(index int, value interface{}) bool) bool { + for iterator.Prev() { + index, value := iterator.Index(), iterator.Value() + if f(index, value) { + return true + } + } + return false +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go index 2f283fb9..5e86fe96 100644 --- a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go @@ -9,10 +9,9 @@ import ( "github.com/emirpasic/gods/containers" ) -func assertSerializationImplementation() { - var _ containers.JSONSerializer = (*List)(nil) - var _ containers.JSONDeserializer = (*List)(nil) -} +// Assert Serialization implementation +var _ containers.JSONSerializer = (*List)(nil) +var _ containers.JSONDeserializer = (*List)(nil) // ToJSON outputs the JSON representation of list's elements. func (list *List) ToJSON() ([]byte, error) { @@ -27,3 +26,13 @@ func (list *List) FromJSON(data []byte) error { } return err } + +// UnmarshalJSON @implements json.Unmarshaler +func (list *List) UnmarshalJSON(bytes []byte) error { + return list.FromJSON(bytes) +} + +// MarshalJSON @implements json.Marshaler +func (list *List) MarshalJSON() ([]byte, error) { + return list.ToJSON() +} diff --git a/vendor/github.com/emirpasic/gods/lists/lists.go b/vendor/github.com/emirpasic/gods/lists/lists.go index 1f6bb08e..55bd619e 100644 --- a/vendor/github.com/emirpasic/gods/lists/lists.go +++ b/vendor/github.com/emirpasic/gods/lists/lists.go @@ -30,4 +30,5 @@ type List interface { // Size() int // Clear() // Values() []interface{} + // String() string } diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go index 70b28cf5..e658f257 100644 --- a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go @@ -19,9 +19,8 @@ import ( "strings" ) -func assertTreeImplementation() { - var _ trees.Tree = (*Heap)(nil) -} +// Assert Tree implementation +var _ trees.Tree = (*Heap)(nil) // Heap holds elements in an array-list type Heap struct { @@ -98,15 +97,19 @@ func (heap *Heap) Clear() { // Values returns all elements in the heap. func (heap *Heap) Values() []interface{} { - return heap.list.Values() + values := make([]interface{}, heap.list.Size(), heap.list.Size()) + for it := heap.Iterator(); it.Next(); { + values[it.Index()] = it.Value() + } + return values } // String returns a string representation of container func (heap *Heap) String() string { str := "BinaryHeap\n" values := []string{} - for _, value := range heap.list.Values() { - values = append(values, fmt.Sprintf("%v", value)) + for it := heap.Iterator(); it.Next(); { + values = append(values, fmt.Sprintf("%v", it.Value())) } str += strings.Join(values, ", ") return str diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go index beeb8d70..f2179633 100644 --- a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go @@ -4,11 +4,12 @@ package binaryheap -import "github.com/emirpasic/gods/containers" +import ( + "github.com/emirpasic/gods/containers" +) -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} +// Assert Iterator implementation +var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) // Iterator returns a stateful iterator whose values can be fetched by an index. type Iterator struct { @@ -45,7 +46,19 @@ func (iterator *Iterator) Prev() bool { // Value returns the current element's value. // Does not modify the state of the iterator. func (iterator *Iterator) Value() interface{} { - value, _ := iterator.heap.list.Get(iterator.index) + start, end := evaluateRange(iterator.index) + if end > iterator.heap.Size() { + end = iterator.heap.Size() + } + tmpHeap := NewWith(iterator.heap.Comparator) + for n := start; n < end; n++ { + value, _ := iterator.heap.list.Get(n) + tmpHeap.Push(value) + } + for n := 0; n < iterator.index-start; n++ { + tmpHeap.Pop() + } + value, _ := tmpHeap.Pop() return value } @@ -82,3 +95,49 @@ func (iterator *Iterator) Last() bool { iterator.End() return iterator.Prev() } + +// NextTo moves the iterator to the next element from current position that satisfies the condition given by the +// passed function, and returns true if there was a next element in the container. +// If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) NextTo(f func(index int, value interface{}) bool) bool { + for iterator.Next() { + index, value := iterator.Index(), iterator.Value() + if f(index, value) { + return true + } + } + return false +} + +// PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the +// passed function, and returns true if there was a next element in the container. +// If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) PrevTo(f func(index int, value interface{}) bool) bool { + for iterator.Prev() { + index, value := iterator.Index(), iterator.Value() + if f(index, value) { + return true + } + } + return false +} + +// numOfBits counts the number of bits of an int +func numOfBits(n int) uint { + var count uint + for n != 0 { + count++ + n >>= 1 + } + return count +} + +// evaluateRange evaluates the index range [start,end) of same level nodes in the heap as the index +func evaluateRange(index int) (start int, end int) { + bits := numOfBits(index+1) - 1 + start = 1< +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/BUILD.bazel b/vendor/github.com/fsnotify/fsnotify/BUILD.bazel index 3798edf6..9bf32cdc 100644 --- a/vendor/github.com/fsnotify/fsnotify/BUILD.bazel +++ b/vendor/github.com/fsnotify/fsnotify/BUILD.bazel @@ -3,14 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "fsnotify", srcs = [ - "fen.go", + "backend_fen.go", + "backend_inotify.go", + "backend_kqueue.go", + "backend_other.go", + "backend_windows.go", "fsnotify.go", - "inotify.go", - "inotify_poller.go", - "kqueue.go", - "open_mode_bsd.go", - "open_mode_darwin.go", - "windows.go", + "system_bsd.go", + "system_darwin.go", ], importmap = "peridot.resf.org/vendor/github.com/fsnotify/fsnotify", importpath = "github.com/fsnotify/fsnotify", @@ -40,6 +40,12 @@ go_library( "@io_bazel_rules_go//go/platform:openbsd": [ "//vendor/golang.org/x/sys/unix", ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows", + ], "//conditions:default": [], }), ) diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index be4d7ea2..e0e57575 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,230 @@ # Changelog -## v1.4.7 / 2018-01-09 +Unreleased +---------- +Nothing yet. + +1.7.0 - 2023-10-22 +------------------ +This version of fsnotify needs Go 1.17. + +### Additions + +- illumos: add FEN backend to support illumos and Solaris. ([#371]) + +- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful + in cases where you can't control the kernel buffer and receive a large number + of events in bursts. ([#550], [#572]) + +- all: add `AddWith()`, which is identical to `Add()` but allows passing + options. ([#521]) + +- windows: allow setting the ReadDirectoryChangesW() buffer size with + `fsnotify.WithBufferSize()`; the default of 64K is the highest value that + works on all platforms and is enough for most purposes, but in some cases a + highest buffer is needed. ([#521]) + +### Changes and fixes + +- inotify: remove watcher if a watched path is renamed ([#518]) + + After a rename the reported name wasn't updated, or even an empty string. + Inotify doesn't provide any good facilities to update it, so just remove the + watcher. This is already how it worked on kqueue and FEN. + + On Windows this does work, and remains working. + +- windows: don't listen for file attribute changes ([#520]) + + File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, + with no way to see if they're a file write or attribute change, so would show + up as a fsnotify.Write event. This is never useful, and could result in many + spurious Write events. + +- windows: return `ErrEventOverflow` if the buffer is full ([#525]) + + Before it would merely return "short read", making it hard to detect this + error. + +- kqueue: make sure events for all files are delivered properly when removing a + watched directory ([#526]) + + Previously they would get sent with `""` (empty string) or `"."` as the path + name. + +- kqueue: don't emit spurious Create events for symbolic links ([#524]) + + The link would get resolved but kqueue would "forget" it already saw the link + itself, resulting on a Create for every Write event for the directory. + +- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) + +- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in + `backend_other.go`, making it easier to use on unsupported platforms such as + WASM, AIX, etc. ([#528]) + +- other: use the `backend_other.go` no-op if the `appengine` build tag is set; + Google AppEngine forbids usage of the unsafe package so the inotify backend + won't compile there. + +[#371]: https://github.com/fsnotify/fsnotify/pull/371 +[#516]: https://github.com/fsnotify/fsnotify/pull/516 +[#518]: https://github.com/fsnotify/fsnotify/pull/518 +[#520]: https://github.com/fsnotify/fsnotify/pull/520 +[#521]: https://github.com/fsnotify/fsnotify/pull/521 +[#524]: https://github.com/fsnotify/fsnotify/pull/524 +[#525]: https://github.com/fsnotify/fsnotify/pull/525 +[#526]: https://github.com/fsnotify/fsnotify/pull/526 +[#528]: https://github.com/fsnotify/fsnotify/pull/528 +[#537]: https://github.com/fsnotify/fsnotify/pull/537 +[#550]: https://github.com/fsnotify/fsnotify/pull/550 +[#572]: https://github.com/fsnotify/fsnotify/pull/572 + +1.6.0 - 2022-10-13 +------------------ +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + +## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -10,62 +234,62 @@ * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## v1.4.2 / 2016-10-10 +## [1.4.2] - 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## v1.4.1 / 2016-10-04 +## [1.4.1] - 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## v1.4.0 / 2016-10-01 +## [1.4.0] - 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## v1.3.1 / 2016-06-28 +## [1.3.1] - 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## v1.3.0 / 2016-04-19 +## [1.3.0] - 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## v1.2.10 / 2016-03-02 +## [1.2.10] - 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## v1.2.9 / 2016-01-13 +## [1.2.9] - 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## v1.2.8 / 2015-12-17 +## [1.2.8] - 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## v1.2.5 / 2015-10-17 +## [1.2.5] - 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## v1.2.1 / 2015-10-14 +## [1.2.1] - 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## v1.2.0 / 2015-02-08 +## [1.2.0] - 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## v1.1.1 / 2015-02-05 +## [1.1.1] - 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## v1.1.0 / 2014-12-12 +## [1.1.0] - 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -77,22 +301,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v1.0.4 / 2014-09-07 +## [1.0.4] - 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## v1.0.3 / 2014-08-19 +## [1.0.3] - 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## v1.0.2 / 2014-08-17 +## [1.0.2] - 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## v1.0.0 / 2014-08-15 +## [1.0.0] - 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -146,51 +370,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## v0.9.3 / 2014-12-31 +## [0.9.3] - 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v0.9.2 / 2014-08-17 +## [0.9.2] - 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## v0.9.1 / 2014-06-12 +## [0.9.1] - 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## v0.9.0 / 2014-01-17 +## [0.9.0] - 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## v0.8.12 / 2013-11-13 +## [0.8.12] - 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## v0.8.11 / 2013-11-02 +## [0.8.11] - 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## v0.8.10 / 2013-10-19 +## [0.8.10] - 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## v0.8.9 / 2013-09-08 +## [0.8.9] - 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## v0.8.8 / 2013-06-17 +## [0.8.8] - 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## v0.8.7 / 2013-06-03 +## [0.8.7] - 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -198,74 +422,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## v0.8.6 / 2013-05-23 +## [0.8.6] - 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## v0.8.5 / 2013-05-09 +## [0.8.5] - 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## v0.8.4 / 2013-04-07 +## [0.8.4] - 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## v0.8.3 / 2013-03-13 +## [0.8.3] - 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## v0.8.2 / 2013-02-07 +## [0.8.2] - 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## v0.8.1 / 2013-01-09 +## [0.8.1] - 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## v0.8.0 / 2012-11-09 +## [0.8.0] - 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## v0.7.4 / 2012-10-09 +## [0.7.4] - 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## v0.7.3 / 2012-09-27 +## [0.7.3] - 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## v0.7.2 / 2012-09-01 +## [0.7.2] - 2012-09-01 * kqueue: events for created directories -## v0.7.1 / 2012-07-14 +## [0.7.1] - 2012-07-14 * [Fix] for renaming files -## v0.7.0 / 2012-07-02 +## [0.7.0] - 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## v0.6.0 / 2012-06-06 +## [0.6.0] - 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## v0.5.1 / 2012-05-22 +## [0.5.1] - 2012-05-22 * [Fix] inotify: remove all watches before Close() -## v0.5.0 / 2012-05-03 +## [0.5.0] - 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -273,22 +497,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## v0.4.0 / 2012-03-30 +## [0.4.0] - 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## v0.3.0 / 2012-02-19 +## [0.3.0] - 2012-02-19 * kqueue: add files when watch directory -## v0.2.0 / 2011-12-30 +## [0.2.0] - 2011-12-30 * update to latest Go weekly code -## v0.1.0 / 2011-10-19 +## [0.1.0] - 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 828a60b2..ea379759 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,77 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb..fb03ade7 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index b2629e52..e480733d 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,130 +1,184 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, BSD, and illumos. -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) +Go 1.17 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: +--- -```console -go get -u golang.org/x/sys/... -``` +Platform support: -Cross platform: Windows, Linux, BSD and macOS. +| Backend | OS | Status | +| :-------------------- | :--------- | :------------------------------------------------------------------------ | +| inotify | Linux | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FEN | illumos | Supported | +| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | +| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | +| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | +| USN Journals | Windows | [Needs support in x/sys/windows][usn] | +| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Linux and illumos should include Android and Solaris, but these are currently +untested. -\* Android and iOS are untested. +[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 +[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 +[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## Example +Further detailed documentation can be found in godoc: +https://pkg.go.dev/github.com/fsnotify/fsnotify -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -## FAQ +### Are subdirectories watched? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 [#18]: https://github.com/fsnotify/fsnotify/issues/18 + +### Do I have to watch the Error and Event channels in a goroutine? +Yes. You can read both channels in the same goroutine using `select` (you don't +need a separate goroutine for both channels; see the example). + +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. + +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. + +[#9]: https://github.com/fsnotify/fsnotify/issues/9 + +### Why do I get many Chmod events? +Some programs may generate a lot of attribute changes; for example Spotlight on +macOS, anti-virus programs, backup applications, and some others are known to do +this. As a rule, it's typically best to ignore Chmod events. They're often not +useful, and tend to cause problems. + +Spotlight indexing on macOS can result in multiple events (see [#15]). A +temporary workaround is to add your folder(s) to the *Spotlight Privacy +settings* until we have a native FSEvents implementation (see [#11]). + [#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +### Watching a file doesn't work well +Watching individual files (rather than directories) is generally not recommended +as many programs (especially editors) update files atomically: it will write to +a temporary file which is then moved to to destination, overwriting the original +(or some variant thereof). The watcher on the original file is now lost, as that +no longer exists. -## Related Projects +The upshot of this is that a power failure or crash won't leave a half-written +file. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +Watch the parent directory and use `Event.Name` to filter out files you're not +interested in. There is an example of this in `cmd/fsnotify/file.go`. +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 + +Reaching the limit will result in a "no space left on device" or "too many open +files" error. + +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. + +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000..28497f1d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,640 @@ +//go:build solaris +// +build solaris + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error + + mu sync.Mutex + port *unix.EventPort + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]struct{} // Explicitly watched directories + watches map[string]struct{} // Explicitly watched non-directories +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + w := &Watcher{ + Events: make(chan Event, sz), + Errors: make(chan error), + dirs: make(map[string]struct{}), + watches: make(map[string]struct{}), + done: make(chan struct{}), + } + + var err error + w.port, err = unix.NewEventPort() + if err != nil { + return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) + } + + go w.readEvents() + return w, nil +} + +// sendEvent attempts to send an event to the user, returning true if the event +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendEvent(name string, op Op) (sent bool) { + select { + case w.Events <- Event{Name: name, Op: op}: + return true + case <-w.done: + return false + } +} + +// sendError attempts to send an error to the user, returning true if the error +// was put in the channel successfully and false if the watcher has been closed. +func (w *Watcher) sendError(err error) (sent bool) { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { + // Take the lock used by associateFile to prevent lingering events from + // being processed after the close + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return nil + } + close(w.done) + return w.port.Close() +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if w.port.PathIsWatched(name) { + return nil + } + + _ = getOptions(opts...) + + // Currently we resolve symlinks that were explicitly requested to be + // watched. Otherwise we would use LStat here. + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Associate all files in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, true, w.associateFile) + if err != nil { + return err + } + + w.mu.Lock() + w.dirs[name] = struct{}{} + w.mu.Unlock() + return nil + } + + err = w.associateFile(name, stat, true) + if err != nil { + return err + } + + w.mu.Lock() + w.watches[name] = struct{}{} + w.mu.Unlock() + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + if !w.port.PathIsWatched(name) { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // The user has expressed an intent. Immediately remove this name from + // whichever watch list it might be in. If it's not in there the delete + // doesn't cause harm. + w.mu.Lock() + delete(w.watches, name) + delete(w.dirs, name) + w.mu.Unlock() + + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Remove associations for every file in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, false, w.dissociateFile) + if err != nil { + return err + } + return nil + } + + err = w.port.DissociatePath(name) + if err != nil { + return err + } + + return nil +} + +// readEvents contains the main loop that runs in a goroutine watching for events. +func (w *Watcher) readEvents() { + // If this function returns, the watcher has been closed and we can close + // these channels + defer func() { + close(w.Errors) + close(w.Events) + }() + + pevents := make([]unix.PortEvent, 8) + for { + count, err := w.port.Get(pevents, 1, nil) + if err != nil && err != unix.ETIME { + // Interrupted system call (count should be 0) ignore and continue + if errors.Is(err, unix.EINTR) && count == 0 { + continue + } + // Get failed because we called w.Close() + if errors.Is(err, unix.EBADF) && w.isClosed() { + return + } + // There was an error not caused by calling w.Close() + if !w.sendError(err) { + return + } + } + + p := pevents[:count] + for _, pevent := range p { + if pevent.Source != unix.PORT_SOURCE_FILE { + // Event from unexpected source received; should never happen. + if !w.sendError(errors.New("Event from unexpected source received")) { + return + } + continue + } + + err = w.handleEvent(&pevent) + if err != nil { + if !w.sendError(err) { + return + } + } + } + } +} + +func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + // Handle all children of the directory. + for _, entry := range files { + finfo, err := entry.Info() + if err != nil { + return err + } + err = handler(filepath.Join(path, finfo.Name()), finfo, false) + if err != nil { + return err + } + } + + // And finally handle the directory itself. + return handler(path, stat, follow) +} + +// handleEvent might need to emit more than one fsnotify event if the events +// bitmap matches more than one event type (e.g. the file was both modified and +// had the attributes changed between when the association was created and the +// when event was returned) +func (w *Watcher) handleEvent(event *unix.PortEvent) error { + var ( + events = event.Events + path = event.Path + fmode = event.Cookie.(os.FileMode) + reRegister = true + ) + + w.mu.Lock() + _, watchedDir := w.dirs[path] + _, watchedPath := w.watches[path] + w.mu.Unlock() + isWatched := watchedDir || watchedPath + + if events&unix.FILE_DELETE != 0 { + if !w.sendEvent(path, Remove) { + return nil + } + reRegister = false + } + if events&unix.FILE_RENAME_FROM != 0 { + if !w.sendEvent(path, Rename) { + return nil + } + // Don't keep watching the new file name + reRegister = false + } + if events&unix.FILE_RENAME_TO != 0 { + // We don't report a Rename event for this case, because Rename events + // are interpreted as referring to the _old_ name of the file, and in + // this case the event would refer to the new name of the file. This + // type of rename event is not supported by fsnotify. + + // inotify reports a Remove event in this case, so we simulate this + // here. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't keep watching the file that was removed + reRegister = false + } + + // The file is gone, nothing left to do. + if !reRegister { + if watchedDir { + w.mu.Lock() + delete(w.dirs, path) + w.mu.Unlock() + } + if watchedPath { + w.mu.Lock() + delete(w.watches, path) + w.mu.Unlock() + } + return nil + } + + // If we didn't get a deletion the file still exists and we're going to have + // to watch it again. Let's Stat it now so that we can compare permissions + // and have what we need to continue watching the file + + stat, err := os.Lstat(path) + if err != nil { + // This is unexpected, but we should still emit an event. This happens + // most often on "rm -r" of a subdirectory inside a watched directory We + // get a modify event of something happening inside, but by the time we + // get here, the sudirectory is already gone. Clearly we were watching + // this path but now it is gone. Let's tell the user that it was + // removed. + if !w.sendEvent(path, Remove) { + return nil + } + // Suppress extra write events on removed directories; they are not + // informative and can be confusing. + return nil + } + + // resolve symlinks that were explicitly watched as we would have at Add() + // time. this helps suppress spurious Chmod events on watched symlinks + if isWatched { + stat, err = os.Stat(path) + if err != nil { + // The symlink still exists, but the target is gone. Report the + // Remove similar to above. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't return the error + } + } + + if events&unix.FILE_MODIFIED != 0 { + if fmode.IsDir() { + if watchedDir { + if err := w.updateDirectory(path); err != nil { + return err + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } + if events&unix.FILE_ATTRIB != 0 && stat != nil { + // Only send Chmod if perms changed + if stat.Mode().Perm() != fmode.Perm() { + if !w.sendEvent(path, Chmod) { + return nil + } + } + } + + if stat != nil { + // If we get here, it means we've hit an event above that requires us to + // continue watching the file or directory + return w.associateFile(path, stat, isWatched) + } + return nil +} + +func (w *Watcher) updateDirectory(path string) error { + // The directory was modified, so we must find unwatched entities and watch + // them. If something was removed from the directory, nothing will happen, + // as everything else should still be watched. + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, entry := range files { + path := filepath.Join(path, entry.Name()) + if w.port.PathIsWatched(path) { + continue + } + + finfo, err := entry.Info() + if err != nil { + return err + } + err = w.associateFile(path, finfo, false) + if err != nil { + if !w.sendError(err) { + return nil + } + } + if !w.sendEvent(path, Create) { + return nil + } + } + return nil +} + +func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { + if w.isClosed() { + return ErrClosed + } + // This is primarily protecting the call to AssociatePath but it is + // important and intentional that the call to PathIsWatched is also + // protected by this mutex. Without this mutex, AssociatePath has been seen + // to error out that the path is already associated. + w.mu.Lock() + defer w.mu.Unlock() + + if w.port.PathIsWatched(path) { + // Remove the old association in favor of this one If we get ENOENT, + // then while the x/sys/unix wrapper still thought that this path was + // associated, the underlying event port did not. This call will have + // cleared up that discrepancy. The most likely cause is that the event + // has fired but we haven't processed it yet. + err := w.port.DissociatePath(path) + if err != nil && err != unix.ENOENT { + return err + } + } + // FILE_NOFOLLOW means we watch symlinks themselves rather than their + // targets. + events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW + if follow { + // We *DO* follow symlinks for explicitly watched entries. + events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, + events, + stat.Mode()) +} + +func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { + if !w.port.PathIsWatched(path) { + return nil + } + return w.port.DissociatePath(path) +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)+len(w.dirs)) + for pathname := range w.dirs { + entries = append(entries, pathname) + } + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000..921c1c1e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,594 @@ +//go:build linux && !appengine +// +build linux,!appengine + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + inotifyFile *os.File + watches *watches + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + closeMu sync.Mutex + doneResp chan struct{} // Channel to respond to Close +} + +type ( + watches struct { + mu sync.RWMutex + wd map[uint32]*watch // wd → watch + path map[string]uint32 // pathname → wd + } + watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[uint32]*watch), + path: make(map[string]uint32), + } +} + +func (w *watches) len() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.wd) +} + +func (w *watches) add(ww *watch) { + w.mu.Lock() + defer w.mu.Unlock() + w.wd[ww.wd] = ww + w.path[ww.path] = ww.wd +} + +func (w *watches) remove(wd uint32) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.path, w.wd[wd].path) + delete(w.wd, wd) +} + +func (w *watches) removePath(path string) (uint32, bool) { + w.mu.Lock() + defer w.mu.Unlock() + + wd, ok := w.path[path] + if !ok { + return 0, false + } + + delete(w.path, path) + delete(w.wd, wd) + + return wd, true +} + +func (w *watches) byPath(path string) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[w.path[path]] +} + +func (w *watches) byWd(wd uint32) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[wd] +} + +func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { + w.mu.Lock() + defer w.mu.Unlock() + + var existing *watch + wd, ok := w.path[path] + if ok { + existing = w.wd[wd] + } + + upd, err := f(existing) + if err != nil { + return err + } + if upd != nil { + w.wd[upd.wd] = upd + w.path[upd.path] = upd.wd + + if upd.wd != wd { + delete(w.wd, wd) + } + } + + return nil +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + // Need to set nonblocking mode for SetDeadline to work, otherwise blocking + // I/O operations won't terminate on close. + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: newWatches(), + Events: make(chan Event, sz), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + return false + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { + w.closeMu.Lock() + if w.isClosed() { + w.closeMu.Unlock() + return nil + } + close(w.done) + w.closeMu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + + name = filepath.Clean(name) + _ = getOptions(opts...) + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + if existing != nil { + flags |= existing.flags | unix.IN_MASK_ADD + } + + wd, err := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return nil, err + } + + if existing == nil { + return &watch{ + wd: uint32(wd), + path: name, + flags: flags, + }, nil + } + + existing.wd = uint32(wd) + existing.flags = flags + return existing, nil + }) +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + return w.remove(filepath.Clean(name)) +} + +func (w *Watcher) remove(name string) error { + wd, ok := w.watches.removePath(name) + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + success, errno := unix.InotifyRmWatch(w.fd, wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + return nil +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + + entries := make([]string, 0, w.watches.len()) + w.watches.mu.RLock() + for pathname := range w.watches.path { + entries = append(entries, pathname) + } + w.watches.mu.RUnlock() + + return entries +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + err = io.EOF // If EOF is received. This should really never happen. + } else if n < 0 { + err = errno // If an error occurred while reading. + } else { + err = errors.New("notify: short read in readEvents()") // Read was too short. + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + watch := w.watches.byWd(uint32(raw.Wd)) + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch.wd) + } + // We can't really update the state when a watched path is moved; + // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove + // the watch. + if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return + } + } + } + + var name string + if watch != nil { + name = watch.path + } + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000..063a0915 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,782 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(0) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event, sz), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + return false + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + _ = getOptions(opts...) + + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { + return w.remove(name, true) +} + +func (w *Watcher) remove(name string, unwatchFiles bool) error { + name = filepath.Clean(name) + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if unwatchFiles && isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error to + // the user, as that will just confuse them with an error about a + // path they did not explicitly watch themselves. + w.Remove(name) + } + } + return nil +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed { + return nil + } + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set; the flags are interpreted as +// described in kevent(2). +// +// Returns the real path to the file which was added, with symlinks resolved. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", ErrClosed + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + link, err := os.Readlink(name) + if err != nil { + // Return nil because Linux can add unresolvable symlinks to the + // watch list without problems, so maintain consistency with + // that. There will be no file events for broken symlinks. + // TODO: more specific check; returns os.PathError; ENOENT? + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[link] + w.mu.Unlock() + + if alreadyWatching { + // Add to watches so we don't get spurious Create events later + // on when we diff the directories. + w.watches[name] = 0 + w.fileExists[name] = struct{}{} + return link, nil + } + + name = link + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and Go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + close(w.Events) + close(w.Errors) + _ = unix.Close(w.kq) + unix.Close(w.closepipe[0]) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if event.Has(Rename) || event.Has(Remove) { + w.remove(event.Name, false) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this; for example, + // mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + err := w.sendDirectoryChangeEvents(fileDir) + if err != nil { + if !w.sendError(err) { + closed = true + } + } + } + } else { + filePath := filepath.Clean(event.Name) + if fi, err := os.Lstat(filePath); err == nil { + err := w.sendFileCreatedEventIfNew(filePath, fi) + if err != nil { + if !w.sendError(err) { + closed = true + } + } + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + // No point sending a write and delete event at the same time: if it's gone, + // then it's gone. + if e.Op.Has(Write) && e.Op.Has(Remove) { + e.Op &^= Write + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := os.ReadDir(dirPath) + if err != nil { + return err + } + + for _, f := range files { + path := filepath.Join(dirPath, f.Name()) + + fi, err := f.Info() + if err != nil { + return fmt.Errorf("%q: %w", path, err) + } + + cleanPath, err := w.internalWatch(path, fi) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", path, err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) error { + files, err := os.ReadDir(dir) + if err != nil { + // Directory no longer exists: we can ignore this safely. kqueue will + // still give us the correct events. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + } + + for _, f := range files { + fi, err := f.Info() + if err != nil { + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + } + + err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + // Don't need to send an error if this file isn't readable. + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + return nil + } + return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + } + } + return nil +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fi) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + // mimic Linux providing delete events for subdirectories, but preserve + // the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000..d34a23c0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,205 @@ +//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) +// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + +package fsnotify + +import "errors" + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("fsnotify not supported on the current platform") +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return nil } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return nil } + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return nil } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000..9bc91e5d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,827 @@ +//go:build windows +// +build windows + +// Windows backend based on ReadDirectoryChangesW() +// +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +// +// Note: the documentation on the Watcher type and methods is generated from +// mkdoc.zsh + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\path\to\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all times, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + // + // ErrEventOverflow is used to indicate there are too many events: + // + // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, closed + watches watchMap // Map of watches (key: i-number) + closed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return NewBufferedWatcher(50) +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, sz), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + w.mu.Lock() + defer w.mu.Unlock() + return w.closed +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + w.mu.Lock() + w.closed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(name string) error { return w.AddWith(name) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + + with := getOptions(opts...) + if with.bufsize < 4096 { + return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") + } + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + bufsize: with.bufsize, + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(name string) error { + if w.isClosed() { + return nil + } + + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + bufsize int + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + recurse bool // Recursive watch? + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf []byte // buffer, allocated later +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { + //pathname, recurse := recursivePath(pathname) + recurse := false + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + recurse: recurse, + buf: make([]byte, bufsize), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + pathname, recurse := recursivePath(pathname) + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + if recurse && !watch.recurse { + return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) + } + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + // We need to pass the array, rather than the slice. + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, + (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), + watch.recurse, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + // This error is handled after the watch == nil check below. + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case nil: + // No error + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + } + + var offset uint32 + for { + if n == 0 { + w.sendError(ErrEventOverflow) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + //lint:ignore ST1005 Windows should be capitalized + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb8..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 89cab046..24c99cc4 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,68 +1,146 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. +// +// Currently supported systems: +// +// Linux 2.6.32+ via inotify +// BSD, macOS via kqueue +// Windows via ReadDirectoryChangesW +// illumos via FEN package fsnotify import ( - "bytes" "errors" "fmt" + "path/filepath" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( + // A new pathname was created. Create Op = 1 << iota + + // The pathname was written to; this does *not* mean the write has finished, + // and a write can be followed by more writes. Write + + // The path was removed; any watches on it will be removed. Some "remove" + // operations may trigger a Rename if the file is actually moved (for + // example "remove to trash" is often a rename). Remove + + // The path was renamed to something else; any watched on it will be + // removed. Rename + + // File attributes were changed. + // + // It's generally not recommended to take action on this event, as it may + // get triggered very frequently by some software. For example, Spotlight + // indexing on macOS, anti-virus software, backup software, etc. Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher +// Common errors that can be reported. var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") + ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + ErrClosed = errors.New("fsnotify: watcher already closed") ) + +func (o Op) String() string { + var b strings.Builder + if o.Has(Create) { + b.WriteString("|CREATE") + } + if o.Has(Remove) { + b.WriteString("|REMOVE") + } + if o.Has(Write) { + b.WriteString("|WRITE") + } + if o.Has(Rename) { + b.WriteString("|RENAME") + } + if o.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] +} + +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h != 0 } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. +func (e Event) String() string { + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) +} + +type ( + addOpt func(opt *withOpts) + withOpts struct { + bufsize int + } +) + +var defaultOpts = withOpts{ + bufsize: 65536, // 64K +} + +func getOptions(opts ...addOpt) withOpts { + with := defaultOpts + for _, o := range opts { + o(&with) + } + return with +} + +// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. +// +// This only has effect on Windows systems, and is a no-op for other backends. +// +// The default value is 64K (65536 bytes) which is the highest value that works +// on all filesystems and should be enough for most applications, but if you +// have a large burst of events it may not be enough. You can increase it if +// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). +// +// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +func WithBufferSize(bytes int) addOpt { + return func(opt *withOpts) { opt.bufsize = bytes } +} + +// Check if this path is recursive (ends with "/..." or "\..."), and return the +// path with the /... stripped. +func recursivePath(path string) (string, bool) { + if filepath.Base(path) == "..." { + return filepath.Dir(path), true + } + return path, false +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b88..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b33f2b4d..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3d..00000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 00000000..99012ae6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,259 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers so you don't need +# to update the same comment 5 times. + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func NewBufferedWatcher(' $newbuffered +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) AddWith(' $addwith +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 50% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 2306c462..4322b0b8 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - +//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 50% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 870c4d6d..5da5ffa7 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - +//go:build darwin // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f31..00000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-git/gcfg/.gitignore b/vendor/github.com/go-git/gcfg/.gitignore new file mode 100644 index 00000000..2d830686 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/.gitignore @@ -0,0 +1 @@ +coverage.out diff --git a/vendor/github.com/go-git/gcfg/BUILD.bazel b/vendor/github.com/go-git/gcfg/BUILD.bazel index b3626700..867e117d 100644 --- a/vendor/github.com/go-git/gcfg/BUILD.bazel +++ b/vendor/github.com/go-git/gcfg/BUILD.bazel @@ -5,8 +5,6 @@ go_library( srcs = [ "doc.go", "errors.go", - "go1_0.go", - "go1_2.go", "read.go", "set.go", ], diff --git a/vendor/github.com/go-git/gcfg/Makefile b/vendor/github.com/go-git/gcfg/Makefile new file mode 100644 index 00000000..73604da6 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/Makefile @@ -0,0 +1,17 @@ +# General +WORKDIR = $(PWD) + +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test + +# Coverage +COVERAGE_REPORT = coverage.out +COVERAGE_MODE = count + +test: + $(GOTEST) ./... + +test-coverage: + echo "" > $(COVERAGE_REPORT); \ + $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... diff --git a/vendor/github.com/go-git/gcfg/go1_0.go b/vendor/github.com/go-git/gcfg/go1_0.go deleted file mode 100644 index 66702107..00000000 --- a/vendor/github.com/go-git/gcfg/go1_0.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.2 - -package gcfg - -type textUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/go-git/gcfg/go1_2.go b/vendor/github.com/go-git/gcfg/go1_2.go deleted file mode 100644 index 6f5843bc..00000000 --- a/vendor/github.com/go-git/gcfg/go1_2.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.2 - -package gcfg - -import ( - "encoding" -) - -type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/go-git/gcfg/read.go b/vendor/github.com/go-git/gcfg/read.go index 4dfdc5cf..ea5d2edd 100644 --- a/vendor/github.com/go-git/gcfg/read.go +++ b/vendor/github.com/go-git/gcfg/read.go @@ -3,16 +3,16 @@ package gcfg import ( "fmt" "io" - "io/ioutil" "os" "strings" + "gopkg.in/warnings.v0" + "github.com/go-git/gcfg/scanner" "github.com/go-git/gcfg/token" - "gopkg.in/warnings.v0" ) -var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b', '\n': '\n'} // no error: invalid literals should be caught by scanner func unquote(s string) string { @@ -224,7 +224,7 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File, // // If callback returns an error, ReadWithCallback terminates with an error too. func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { - src, err := ioutil.ReadAll(reader) + src, err := io.ReadAll(reader) if err != nil { return err } @@ -239,7 +239,7 @@ func ReadWithCallback(reader io.Reader, callback func(string, string, string, st // ReadInto reads gcfg formatted data from reader and sets the values into the // corresponding fields in config. func ReadInto(config interface{}, reader io.Reader) error { - src, err := ioutil.ReadAll(reader) + src, err := io.ReadAll(reader) if err != nil { return err } @@ -263,7 +263,7 @@ func ReadFileInto(config interface{}, filename string) error { return err } defer f.Close() - src, err := ioutil.ReadAll(f) + src, err := io.ReadAll(f) if err != nil { return err } diff --git a/vendor/github.com/go-git/gcfg/scanner/scanner.go b/vendor/github.com/go-git/gcfg/scanner/scanner.go index 41aafec7..b3da03d0 100644 --- a/vendor/github.com/go-git/gcfg/scanner/scanner.go +++ b/vendor/github.com/go-git/gcfg/scanner/scanner.go @@ -8,7 +8,6 @@ // // Note that the API for the scanner package may change to accommodate new // features or implementation changes in gcfg. -// package scanner import ( @@ -16,9 +15,7 @@ import ( "path/filepath" "unicode" "unicode/utf8" -) -import ( "github.com/go-git/gcfg/token" ) @@ -26,13 +23,11 @@ import ( // encountered and a handler was installed, the handler is called with a // position and an error message. The position points to the beginning of // the offending token. -// type ErrorHandler func(pos token.Position, msg string) // A Scanner holds the scanner's internal state while processing // a given text. It can be allocated as part of another data // structure but must be initialized via Init before use. -// type Scanner struct { // immutable state file *token.File // source file handle @@ -54,7 +49,6 @@ type Scanner struct { // Read the next Unicode char into s.ch. // s.ch < 0 means end-of-file. -// func (s *Scanner) next() { if s.rdOffset < len(s.src) { s.offset = s.rdOffset @@ -87,7 +81,6 @@ func (s *Scanner) next() { // A mode value is a set of flags (or 0). // They control scanner behavior. -// type Mode uint const ( @@ -108,7 +101,6 @@ const ( // // Note that Init may call err if there is an error in the first character // of the file. -// func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { // Explicitly initialize all fields since a scanner may be reused. if file.Size() != len(src) { @@ -163,12 +155,13 @@ func (s *Scanner) scanIdentifier() string { return string(s.src[offs:s.offset]) } +// val indicate if we are scanning a value (vs a header) func (s *Scanner) scanEscape(val bool) { offs := s.offset ch := s.ch s.next() // always make progress switch ch { - case '\\', '"': + case '\\', '"', '\n': // ok case 'n', 't', 'b': if val { @@ -289,7 +282,6 @@ func (s *Scanner) skipWhitespace() { // Scan adds line information to the file added to the file // set with Init. Token positions are relative to that file // and thus relative to the file set. -// func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { scanAgain: s.skipWhitespace() diff --git a/vendor/github.com/go-git/gcfg/set.go b/vendor/github.com/go-git/gcfg/set.go index e2d92780..dc9795db 100644 --- a/vendor/github.com/go-git/gcfg/set.go +++ b/vendor/github.com/go-git/gcfg/set.go @@ -2,6 +2,7 @@ package gcfg import ( "bytes" + "encoding" "encoding/gob" "fmt" "math/big" @@ -10,8 +11,9 @@ import ( "unicode" "unicode/utf8" - "github.com/go-git/gcfg/types" "gopkg.in/warnings.v0" + + "github.com/go-git/gcfg/types" ) type tag struct { @@ -65,7 +67,7 @@ var setters = []setter{ } func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { - dtu, ok := d.(textUnmarshaler) + dtu, ok := d.(encoding.TextUnmarshaler) if !ok { return errUnsupportedType } diff --git a/vendor/github.com/go-git/go-billy/v5/Makefile b/vendor/github.com/go-git/go-billy/v5/Makefile new file mode 100644 index 00000000..74dad8b4 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/Makefile @@ -0,0 +1,11 @@ +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test + +.PHONY: test +test: + $(GOTEST) -race ./... + +test-coverage: + echo "" > $(COVERAGE_REPORT); \ + $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go index f217693e..dab73968 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -310,14 +310,14 @@ func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File flag: flag, } - if isAppend(flag) { - new.position = int64(new.content.Len()) - } - if isTruncate(flag) { new.content.Truncate() } + if isAppend(flag) { + new.position = int64(new.content.Len()) + } + return new } diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go index d3ff5a25..e3c4e38b 100644 --- a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "sync" ) type storage struct { @@ -174,6 +175,8 @@ func clean(path string) string { type content struct { name string bytes []byte + + m sync.RWMutex } func (c *content) WriteAt(p []byte, off int64) (int, error) { @@ -185,6 +188,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) { } } + c.m.Lock() prev := len(c.bytes) diff := int(off) - prev @@ -196,6 +200,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) { if len(c.bytes) < prev { c.bytes = c.bytes[:prev] } + c.m.Unlock() return len(p), nil } @@ -209,8 +214,10 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) { } } + c.m.RLock() size := int64(len(c.bytes)) if off >= size { + c.m.RUnlock() return 0, io.EOF } @@ -220,10 +227,12 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) { } btr := c.bytes[off : off+l] + n = copy(b, btr) + if len(btr) < len(b) { err = io.EOF } - n = copy(b, btr) + c.m.RUnlock() return } diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/BUILD.bazel b/vendor/github.com/go-git/go-billy/v5/osfs/BUILD.bazel index b47c90db..3ae45016 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/BUILD.bazel +++ b/vendor/github.com/go-git/go-billy/v5/osfs/BUILD.bazel @@ -4,7 +4,10 @@ go_library( name = "osfs", srcs = [ "os.go", + "os_bound.go", + "os_chroot.go", "os_js.go", + "os_options.go", "os_plan9.go", "os_posix.go", "os_windows.go", @@ -14,36 +17,43 @@ go_library( visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:aix": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:illumos": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", @@ -54,30 +64,36 @@ go_library( "//vendor/github.com/go-git/go-billy/v5/memfs", ], "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", ], "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/unix", ], "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/cyphar/filepath-securejoin", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/golang.org/x/sys/windows", diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/vendor/github.com/go-git/go-billy/v5/osfs/os.go index 9665d275..a7fe79f2 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os.go @@ -1,140 +1,123 @@ +//go:build !js // +build !js // Package osfs provides a billy filesystem for the OS. -package osfs // import "github.com/go-git/go-billy/v5/osfs" +package osfs import ( - "io/ioutil" + "fmt" + "io/fs" "os" - "path/filepath" "sync" "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/chroot" ) const ( - defaultDirectoryMode = 0755 - defaultCreateMode = 0666 + defaultDirectoryMode = 0o755 + defaultCreateMode = 0o666 ) // Default Filesystem representing the root of the os filesystem. -var Default = &OS{} - -// OS is a filesystem based on the os filesystem. -type OS struct{} +var Default = &ChrootOS{} // New returns a new OS filesystem. -func New(baseDir string) billy.Filesystem { - return chroot.New(Default, baseDir) +// By default paths are deduplicated, but still enforced +// under baseDir. For more info refer to WithDeduplicatePath. +func New(baseDir string, opts ...Option) billy.Filesystem { + o := &options{ + deduplicatePath: true, + } + for _, opt := range opts { + opt(o) + } + + if o.Type == BoundOSFS { + return newBoundOS(baseDir, o.deduplicatePath) + } + + return newChrootOS(baseDir) } -func (fs *OS) Create(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +// WithBoundOS returns the option of using a Bound filesystem OS. +func WithBoundOS() Option { + return func(o *options) { + o.Type = BoundOSFS + } } -func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { - if flag&os.O_CREATE != 0 { - if err := fs.createDir(filename); err != nil { +// WithChrootOS returns the option of using a Chroot filesystem OS. +func WithChrootOS() Option { + return func(o *options) { + o.Type = ChrootOSFS + } +} + +// WithDeduplicatePath toggles the deduplication of the base dir in the path. +// This occurs when absolute links are being used. +// Assuming base dir /base/dir and an absolute symlink /base/dir/target: +// +// With DeduplicatePath (default): /base/dir/target +// Without DeduplicatePath: /base/dir/base/dir/target +// +// This option is only used by the BoundOS OS type. +func WithDeduplicatePath(enabled bool) Option { + return func(o *options) { + o.deduplicatePath = enabled + } +} + +type options struct { + Type + deduplicatePath bool +} + +type Type int + +const ( + ChrootOSFS Type = iota + BoundOSFS +) + +func readDir(dir string) ([]os.FileInfo, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + infos := make([]fs.FileInfo, 0, len(entries)) + for _, entry := range entries { + fi, err := entry.Info() + if err != nil { return nil, err } + infos = append(infos, fi) } - - f, err := os.OpenFile(filename, flag, perm) - if err != nil { - return nil, err - } - return &file{File: f}, err + return infos, nil } -func (fs *OS) createDir(fullpath string) error { - dir := filepath.Dir(fullpath) - if dir != "." { - if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { - return err - } - } - - return nil -} - -func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { - l, err := ioutil.ReadDir(path) - if err != nil { - return nil, err - } - - var s = make([]os.FileInfo, len(l)) - for i, f := range l { - s[i] = f - } - - return s, nil -} - -func (fs *OS) Rename(from, to string) error { - if err := fs.createDir(to); err != nil { - return err - } - - return rename(from, to) -} - -func (fs *OS) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, defaultDirectoryMode) -} - -func (fs *OS) Open(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDONLY, 0) -} - -func (fs *OS) Stat(filename string) (os.FileInfo, error) { - return os.Stat(filename) -} - -func (fs *OS) Remove(filename string) error { - return os.Remove(filename) -} - -func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { - if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { - return nil, err - } - - f, err := ioutil.TempFile(dir, prefix) +func tempFile(dir, prefix string) (billy.File, error) { + f, err := os.CreateTemp(dir, prefix) if err != nil { return nil, err } return &file{File: f}, nil } -func (fs *OS) Join(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *OS) RemoveAll(path string) error { - return os.RemoveAll(filepath.Clean(path)) -} - -func (fs *OS) Lstat(filename string) (os.FileInfo, error) { - return os.Lstat(filepath.Clean(filename)) -} - -func (fs *OS) Symlink(target, link string) error { - if err := fs.createDir(link); err != nil { - return err +func openFile(fn string, flag int, perm os.FileMode, createDir func(string) error) (billy.File, error) { + if flag&os.O_CREATE != 0 { + if createDir == nil { + return nil, fmt.Errorf("createDir func cannot be nil if file needs to be opened in create mode") + } + if err := createDir(fn); err != nil { + return nil, err + } } - return os.Symlink(target, link) -} - -func (fs *OS) Readlink(link string) (string, error) { - return os.Readlink(link) -} - -// Capabilities implements the Capable interface. -func (fs *OS) Capabilities() billy.Capability { - return billy.DefaultCapabilities + f, err := os.OpenFile(fn, flag, perm) + if err != nil { + return nil, err + } + return &file{File: f}, err } // file is a wrapper for an os.File which adds support for file locking. diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go new file mode 100644 index 00000000..b4b6dbc0 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go @@ -0,0 +1,261 @@ +//go:build !js +// +build !js + +/* + Copyright 2022 The Flux authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package osfs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/go-git/go-billy/v5" +) + +// BoundOS is a fs implementation based on the OS filesystem which is bound to +// a base dir. +// Prefer this fs implementation over ChrootOS. +// +// Behaviours of note: +// 1. Read and write operations can only be directed to files which descends +// from the base dir. +// 2. Symlinks don't have their targets modified, and therefore can point +// to locations outside the base dir or to non-existent paths. +// 3. Readlink and Lstat ensures that the link file is located within the base +// dir, evaluating any symlinks that file or base dir may contain. +type BoundOS struct { + baseDir string + deduplicatePath bool +} + +func newBoundOS(d string, deduplicatePath bool) billy.Filesystem { + return &BoundOS{baseDir: d, deduplicatePath: deduplicatePath} +} + +func (fs *BoundOS) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +} + +func (fs *BoundOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + fn, err := fs.abs(filename) + if err != nil { + return nil, err + } + return openFile(fn, flag, perm, fs.createDir) +} + +func (fs *BoundOS) ReadDir(path string) ([]os.FileInfo, error) { + dir, err := fs.abs(path) + if err != nil { + return nil, err + } + + return readDir(dir) +} + +func (fs *BoundOS) Rename(from, to string) error { + f, err := fs.abs(from) + if err != nil { + return err + } + t, err := fs.abs(to) + if err != nil { + return err + } + + // MkdirAll for target name. + if err := fs.createDir(t); err != nil { + return err + } + + return os.Rename(f, t) +} + +func (fs *BoundOS) MkdirAll(path string, perm os.FileMode) error { + dir, err := fs.abs(path) + if err != nil { + return err + } + return os.MkdirAll(dir, perm) +} + +func (fs *BoundOS) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *BoundOS) Stat(filename string) (os.FileInfo, error) { + filename, err := fs.abs(filename) + if err != nil { + return nil, err + } + return os.Stat(filename) +} + +func (fs *BoundOS) Remove(filename string) error { + fn, err := fs.abs(filename) + if err != nil { + return err + } + return os.Remove(fn) +} + +// TempFile creates a temporary file. If dir is empty, the file +// will be created within the OS Temporary dir. If dir is provided +// it must descend from the current base dir. +func (fs *BoundOS) TempFile(dir, prefix string) (billy.File, error) { + if dir != "" { + var err error + dir, err = fs.abs(dir) + if err != nil { + return nil, err + } + } + + return tempFile(dir, prefix) +} + +func (fs *BoundOS) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *BoundOS) RemoveAll(path string) error { + dir, err := fs.abs(path) + if err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (fs *BoundOS) Symlink(target, link string) error { + ln, err := fs.abs(link) + if err != nil { + return err + } + // MkdirAll for containing dir. + if err := fs.createDir(ln); err != nil { + return err + } + return os.Symlink(target, ln) +} + +func (fs *BoundOS) Lstat(filename string) (os.FileInfo, error) { + filename = filepath.Clean(filename) + if !filepath.IsAbs(filename) { + filename = filepath.Join(fs.baseDir, filename) + } + if ok, err := fs.insideBaseDirEval(filename); !ok { + return nil, err + } + return os.Lstat(filename) +} + +func (fs *BoundOS) Readlink(link string) (string, error) { + if !filepath.IsAbs(link) { + link = filepath.Clean(filepath.Join(fs.baseDir, link)) + } + if ok, err := fs.insideBaseDirEval(link); !ok { + return "", err + } + return os.Readlink(link) +} + +// Chroot returns a new OS filesystem, with the base dir set to the +// result of joining the provided path with the underlying base dir. +func (fs *BoundOS) Chroot(path string) (billy.Filesystem, error) { + joined, err := securejoin.SecureJoin(fs.baseDir, path) + if err != nil { + return nil, err + } + return New(joined), nil +} + +// Root returns the current base dir of the billy.Filesystem. +// This is required in order for this implementation to be a drop-in +// replacement for other upstream implementations (e.g. memory and osfs). +func (fs *BoundOS) Root() string { + return fs.baseDir +} + +func (fs *BoundOS) createDir(fullpath string) error { + dir := filepath.Dir(fullpath) + if dir != "." { + if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { + return err + } + } + + return nil +} + +// abs transforms filename to an absolute path, taking into account the base dir. +// Relative paths won't be allowed to ascend the base dir, so `../file` will become +// `/working-dir/file`. +// +// Note that if filename is a symlink, the returned address will be the target of the +// symlink. +func (fs *BoundOS) abs(filename string) (string, error) { + if filename == fs.baseDir { + filename = string(filepath.Separator) + } + + path, err := securejoin.SecureJoin(fs.baseDir, filename) + if err != nil { + return "", nil + } + + if fs.deduplicatePath { + vol := filepath.VolumeName(fs.baseDir) + dup := filepath.Join(fs.baseDir, fs.baseDir[len(vol):]) + if strings.HasPrefix(path, dup+string(filepath.Separator)) { + return fs.abs(path[len(dup):]) + } + } + return path, nil +} + +// insideBaseDir checks whether filename is located within +// the fs.baseDir. +func (fs *BoundOS) insideBaseDir(filename string) (bool, error) { + if filename == fs.baseDir { + return true, nil + } + if !strings.HasPrefix(filename, fs.baseDir+string(filepath.Separator)) { + return false, fmt.Errorf("path outside base dir") + } + return true, nil +} + +// insideBaseDirEval checks whether filename is contained within +// a dir that is within the fs.baseDir, by first evaluating any symlinks +// that either filename or fs.baseDir may contain. +func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) { + dir, err := filepath.EvalSymlinks(filepath.Dir(filename)) + if dir == "" || os.IsNotExist(err) { + dir = filepath.Dir(filename) + } + wd, err := filepath.EvalSymlinks(fs.baseDir) + if wd == "" || os.IsNotExist(err) { + wd = fs.baseDir + } + if filename != wd && dir != wd && !strings.HasPrefix(dir, wd+string(filepath.Separator)) { + return false, fmt.Errorf("path outside base dir") + } + return true, nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go new file mode 100644 index 00000000..fd65e773 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go @@ -0,0 +1,112 @@ +//go:build !js +// +build !js + +package osfs + +import ( + "os" + "path/filepath" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" +) + +// ChrootOS is a legacy filesystem based on a "soft chroot" of the os filesystem. +// Although this is still the default os filesystem, consider using BoundOS instead. +// +// Behaviours of note: +// 1. A "soft chroot" translates the base dir to "/" for the purposes of the +// fs abstraction. +// 2. Symlinks targets may be modified to be kept within the chroot bounds. +// 3. Some file modes does not pass-through the fs abstraction. +// 4. The combination of 1 and 2 may cause go-git to think that a Git repository +// is dirty, when in fact it isn't. +type ChrootOS struct{} + +func newChrootOS(baseDir string) billy.Filesystem { + return chroot.New(&ChrootOS{}, baseDir) +} + +func (fs *ChrootOS) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +} + +func (fs *ChrootOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + return openFile(filename, flag, perm, fs.createDir) +} + +func (fs *ChrootOS) createDir(fullpath string) error { + dir := filepath.Dir(fullpath) + if dir != "." { + if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { + return err + } + } + + return nil +} + +func (fs *ChrootOS) ReadDir(dir string) ([]os.FileInfo, error) { + return readDir(dir) +} + +func (fs *ChrootOS) Rename(from, to string) error { + if err := fs.createDir(to); err != nil { + return err + } + + return rename(from, to) +} + +func (fs *ChrootOS) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, defaultDirectoryMode) +} + +func (fs *ChrootOS) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *ChrootOS) Stat(filename string) (os.FileInfo, error) { + return os.Stat(filename) +} + +func (fs *ChrootOS) Remove(filename string) error { + return os.Remove(filename) +} + +func (fs *ChrootOS) TempFile(dir, prefix string) (billy.File, error) { + if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { + return nil, err + } + + return tempFile(dir, prefix) +} + +func (fs *ChrootOS) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *ChrootOS) RemoveAll(path string) error { + return os.RemoveAll(filepath.Clean(path)) +} + +func (fs *ChrootOS) Lstat(filename string) (os.FileInfo, error) { + return os.Lstat(filepath.Clean(filename)) +} + +func (fs *ChrootOS) Symlink(target, link string) error { + if err := fs.createDir(link); err != nil { + return err + } + + return os.Symlink(target, link) +} + +func (fs *ChrootOS) Readlink(link string) (string, error) { + return os.Readlink(link) +} + +// Capabilities implements the Capable interface. +func (fs *ChrootOS) Capabilities() billy.Capability { + return billy.DefaultCapabilities +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go index 8ae68fed..2e58aa5c 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go @@ -1,3 +1,4 @@ +//go:build js // +build js package osfs @@ -16,6 +17,9 @@ var globalMemFs = memfs.New() var Default = memfs.New() // New returns a new OS filesystem. -func New(baseDir string) billy.Filesystem { +func New(baseDir string, _ ...Option) billy.Filesystem { return chroot.New(Default, Default.Join("/", baseDir)) } + +type options struct { +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go new file mode 100644 index 00000000..2f235c6d --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go @@ -0,0 +1,3 @@ +package osfs + +type Option func(*options) diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go index e8f519ff..84020b52 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package osfs @@ -83,3 +84,8 @@ func dirwstat(name string, d *syscall.Dir) error { } return nil } + +func umask(new int) func() { + return func() { + } +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go index c74d60ee..d834a114 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go @@ -1,9 +1,11 @@ +//go:build !plan9 && !windows && !js // +build !plan9,!windows,!js package osfs import ( "os" + "syscall" "golang.org/x/sys/unix" ) @@ -25,3 +27,12 @@ func (f *file) Unlock() error { func rename(from, to string) error { return os.Rename(from, to) } + +// umask sets umask to a new value, and returns a func which allows the +// caller to reset it back to what it was originally. +func umask(new int) func() { + old := syscall.Umask(new) + return func() { + syscall.Umask(old) + } +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go index 8f5caeb0..e54df748 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package osfs @@ -10,15 +11,6 @@ import ( "golang.org/x/sys/windows" ) -type fileInfo struct { - os.FileInfo - name string -} - -func (fi *fileInfo) Name() string { - return fi.name -} - var ( kernel32DLL = windows.NewLazySystemDLL("kernel32.dll") lockFileExProc = kernel32DLL.NewProc("LockFileEx") @@ -59,3 +51,8 @@ func (f *file) Unlock() error { func rename(from, to string) error { return os.Rename(from, to) } + +func umask(new int) func() { + return func() { + } +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/BUILD.bazel b/vendor/github.com/go-git/go-billy/v5/util/BUILD.bazel index fdb59a30..8112157b 100644 --- a/vendor/github.com/go-git/go-billy/v5/util/BUILD.bazel +++ b/vendor/github.com/go-git/go-billy/v5/util/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "glob.go", "util.go", + "walk.go", ], importmap = "peridot.resf.org/vendor/github.com/go-git/go-billy/v5/util", importpath = "github.com/go-git/go-billy/v5/util", diff --git a/vendor/github.com/go-git/go-billy/v5/util/walk.go b/vendor/github.com/go-git/go-billy/v5/util/walk.go new file mode 100644 index 00000000..1531bcaa --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/util/walk.go @@ -0,0 +1,72 @@ +package util + +import ( + "os" + "path/filepath" + + "github.com/go-git/go-billy/v5" +) + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + if !info.IsDir() { + return walkFn(path, info, nil) + } + + names, err := readdirnames(fs, path) + err1 := walkFn(path, info, err) + // If err != nil, walk can't walk into this directory. + // err1 != nil means walkFn want walk to skip this directory or stop walking. + // Therefore, if one of err and err1 isn't nil, walk will return. + if err != nil || err1 != nil { + // The caller's behavior is controlled by the return value, which is decided + // by walkFn. walkFn may ignore err and return nil. + // If walkFn returns SkipDir, it will be handled by the caller. + // So walk should return whatever walkFn returns. + return err1 + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := fs.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// Walk walks the file tree rooted at root, calling fn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by fn: see the WalkFunc documentation for +// details. +// +// The files are walked in lexical order, which makes the output deterministic +// but requires Walk to read an entire directory into memory before proceeding +// to walk that directory. Walk does not follow symbolic links. +// +// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500 +func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error { + info, err := fs.Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = walk(fs, root, info, walkFn) + } + + if err == filepath.SkipDir { + return nil + } + + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/.gitignore b/vendor/github.com/go-git/go-git/v5/.gitignore index 038dd9f1..b7f2c580 100644 --- a/vendor/github.com/go-git/go-git/v5/.gitignore +++ b/vendor/github.com/go-git/go-git/v5/.gitignore @@ -2,3 +2,6 @@ coverage.out *~ coverage.txt profile.out +.tmp/ +.git-dist/ +.vscode diff --git a/vendor/github.com/go-git/go-git/v5/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/BUILD.bazel index e09848ed..386c463e 100644 --- a/vendor/github.com/go-git/go-git/v5/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/BUILD.bazel @@ -9,14 +9,15 @@ go_library( "object_walker.go", "options.go", "prune.go", - "references.go", "remote.go", "repository.go", + "signer.go", "status.go", "submodule.go", "worktree.go", "worktree_bsd.go", "worktree_commit.go", + "worktree_js.go", "worktree_linux.go", "worktree_plan9.go", "worktree_status.go", @@ -27,17 +28,22 @@ go_library( importpath = "github.com/go-git/go-git/v5", visibility = ["//visibility:public"], deps = [ + "//vendor/dario.cat/mergo", "//vendor/github.com/go-git/go-billy/v5:go-billy", "//vendor/github.com/go-git/go-billy/v5/osfs", "//vendor/github.com/go-git/go-billy/v5/util", "//vendor/github.com/go-git/go-git/v5/config", + "//vendor/github.com/go-git/go-git/v5/internal/path_util", "//vendor/github.com/go-git/go-git/v5/internal/revision", + "//vendor/github.com/go-git/go-git/v5/internal/url", "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/cache", "//vendor/github.com/go-git/go-git/v5/plumbing/filemode", + "//vendor/github.com/go-git/go-git/v5/plumbing/format/config", "//vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore", "//vendor/github.com/go-git/go-git/v5/plumbing/format/index", "//vendor/github.com/go-git/go-git/v5/plumbing/format/packfile", + "//vendor/github.com/go-git/go-git/v5/plumbing/hash", "//vendor/github.com/go-git/go-git/v5/plumbing/object", "//vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp", "//vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability", @@ -56,8 +62,9 @@ go_library( "//vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem", "//vendor/github.com/go-git/go-git/v5/utils/merkletrie/index", "//vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder", - "//vendor/github.com/imdario/mergo", + "//vendor/github.com/go-git/go-git/v5/utils/sync", "//vendor/github.com/sergi/go-diff/diffmatchpatch", - "//vendor/golang.org/x/crypto/openpgp", + "@com_github_protonmail_go_crypto//openpgp", + "@com_github_protonmail_go_crypto//openpgp/packet", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md index 2a72b501..ff0c22c8 100644 --- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -1,111 +1,233 @@ -Supported Capabilities -====================== +# Supported Features -Here is a non-comprehensive table of git commands and features whose equivalent -is supported by go-git. +Here is a non-comprehensive table of git commands and features and their +compatibility status with go-git. -| Feature | Status | Notes | -|---------------------------------------|--------|-------| -| **config** | -| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | -| **getting and creating repositories** | -| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | -| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | -| **basic snapshotting** | -| add | ✔ | Plain add is supported. Any other flags aren't supported | -| status | ✔ | -| commit | ✔ | -| reset | ✔ | -| rm | ✔ | -| mv | ✔ | -| **branching and merging** | -| branch | ✔ | -| checkout | ✔ | Basic usages of checkout are supported. | -| merge | ✖ | -| mergetool | ✖ | -| stash | ✖ | -| tag | ✔ | -| **sharing and updating projects** | -| fetch | ✔ | -| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | -| push | ✔ | -| remote | ✔ | -| submodule | ✔ | -| **inspection and comparison** | -| show | ✔ | -| log | ✔ | -| shortlog | (see log) | -| describe | | -| **patching** | -| apply | ✖ | -| cherry-pick | ✖ | -| diff | ✔ | Patch object with UnifiedDiff output representation | -| rebase | ✖ | -| revert | ✖ | -| **debugging** | -| bisect | ✖ | -| blame | ✔ | -| grep | ✔ | -| **email** || -| am | ✖ | -| apply | ✖ | -| format-patch | ✖ | -| send-email | ✖ | -| request-pull | ✖ | -| **external systems** | -| svn | ✖ | -| fast-import | ✖ | -| **administration** | -| clean | ✔ | -| gc | ✖ | -| fsck | ✖ | -| reflog | ✖ | -| filter-branch | ✖ | -| instaweb | ✖ | -| archive | ✖ | -| bundle | ✖ | -| prune | ✖ | -| repack | ✖ | -| **server admin** | -| daemon | | -| update-server-info | | -| **advanced** | -| notes | ✖ | -| replace | ✖ | -| worktree | ✖ | -| annotate | (see blame) | -| **gpg** | -| git-verify-commit | ✔ | -| git-verify-tag | ✔ | -| **plumbing commands** | -| cat-file | ✔ | -| check-ignore | | -| commit-tree | | -| count-objects | | -| diff-index | | -| for-each-ref | ✔ | -| hash-object | ✔ | -| ls-files | ✔ | -| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | -| read-tree | | -| rev-list | ✔ | -| rev-parse | | -| show-ref | ✔ | -| symbolic-ref | ✔ | -| update-index | | -| update-ref | | -| verify-pack | | -| write-tree | | -| **protocols** | -| http(s):// (dumb) | ✖ | -| http(s):// (smart) | ✔ | -| git:// | ✔ | -| ssh:// | ✔ | -| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | -| custom | ✔ | -| **other features** | -| gitignore | ✔ | -| gitattributes | ✖ | -| index version | | -| packfile version | | -| push-certs | ✖ | +## Getting and creating repositories + +| Feature | Sub-feature | Status | Notes | Examples | +| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `init` | | ✅ | | | +| `init` | `--bare` | ✅ | | | +| `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | | +| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) | +| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | +| `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules`
`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) | + +## Basic snapshotting + +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ | +| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | | +| `status` | | ✅ | | | +| `commit` | | ✅ | | - [commit](_examples/commit/main.go) | +| `reset` | | ✅ | | | +| `rm` | | ✅ | | | +| `mv` | | ✅ | | | + +## Branching and merging + +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | +| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | +| `merge` | | ⚠️ (partial) | Fast-forward only | | +| `mergetool` | | ❌ | | | +| `stash` | | ❌ | | | +| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | + +## Sharing and updating projects + +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ | +| `fetch` | | ✅ | | | +| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) | +| `push` | | ✅ | | - [push](_examples/push/main.go) | +| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) | +| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) | +| `submodule` | deinit | ❌ | | | + +## Inspection and comparison + +| Feature | Sub-feature | Status | Notes | Examples | +| ---------- | ----------- | --------- | ----- | ------------------------------ | +| `show` | | ✅ | | | +| `log` | | ✅ | | - [log](_examples/log/main.go) | +| `shortlog` | | (see log) | | | +| `describe` | | ❌ | | | + +## Patching + +| Feature | Sub-feature | Status | Notes | Examples | +| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- | +| `apply` | | ❌ | | | +| `cherry-pick` | | ❌ | | | +| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | | +| `rebase` | | ❌ | | | +| `revert` | | ❌ | | | + +## Debugging + +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | ----- | ---------------------------------- | +| `bisect` | | ❌ | | | +| `blame` | | ✅ | | - [blame](_examples/blame/main.go) | +| `grep` | | ✅ | | | + +## Email + +| Feature | Sub-feature | Status | Notes | Examples | +| -------------- | ----------- | ------ | ----- | -------- | +| `am` | | ❌ | | | +| `apply` | | ❌ | | | +| `format-patch` | | ❌ | | | +| `send-email` | | ❌ | | | +| `request-pull` | | ❌ | | | + +## External systems + +| Feature | Sub-feature | Status | Notes | Examples | +| ------------- | ----------- | ------ | ----- | -------- | +| `svn` | | ❌ | | | +| `fast-import` | | ❌ | | | +| `lfs` | | ❌ | | | + +## Administration + +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | ----------- | ------ | ----- | -------- | +| `clean` | | ✅ | | | +| `gc` | | ❌ | | | +| `fsck` | | ❌ | | | +| `reflog` | | ❌ | | | +| `filter-branch` | | ❌ | | | +| `instaweb` | | ❌ | | | +| `archive` | | ❌ | | | +| `bundle` | | ❌ | | | +| `prune` | | ❌ | | | +| `repack` | | ❌ | | | + +## Server admin + +| Feature | Sub-feature | Status | Notes | Examples | +| -------------------- | ----------- | ------ | ----- | ----------------------------------------- | +| `daemon` | | ❌ | | | +| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) | + +## Advanced + +| Feature | Sub-feature | Status | Notes | Examples | +| ---------- | ----------- | ----------- | ----- | -------- | +| `notes` | | ❌ | | | +| `replace` | | ❌ | | | +| `worktree` | | ❌ | | | +| `annotate` | | (see blame) | | | + +## GPG + +| Feature | Sub-feature | Status | Notes | Examples | +| ------------------- | ----------- | ------ | ----- | -------- | +| `git-verify-commit` | | ✅ | | | +| `git-verify-tag` | | ✅ | | | + +## Plumbing commands + +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- | +| `cat-file` | | ✅ | | | +| `check-ignore` | | ❌ | | | +| `commit-tree` | | ❌ | | | +| `count-objects` | | ❌ | | | +| `diff-index` | | ❌ | | | +| `for-each-ref` | | ✅ | | | +| `hash-object` | | ✅ | | | +| `ls-files` | | ✅ | | | +| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) | +| `merge-base` | `--independent`
`--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) | +| `merge-base` | `--fork-point`
`--octopus` | ❌ | | | +| `read-tree` | | ❌ | | | +| `rev-list` | | ✅ | | | +| `rev-parse` | | ❌ | | | +| `show-ref` | | ✅ | | | +| `symbolic-ref` | | ✅ | | | +| `update-index` | | ❌ | | | +| `update-ref` | | ❌ | | | +| `verify-pack` | | ❌ | | | +| `write-tree` | | ❌ | | | + +## Indexes and Git Protocols + +| Feature | Version | Status | Notes | +| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- | +| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | | +| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | | +| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | | +| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| cruft packs | | ❌ | | + +## Capabilities + +| Feature | Status | Notes | +| ------------------------------ | ------------ | ----- | +| `multi_ack` | ❌ | | +| `multi_ack_detailed` | ❌ | | +| `no-done` | ❌ | | +| `thin-pack` | ❌ | | +| `side-band` | ⚠️ (partial) | | +| `side-band-64k` | ⚠️ (partial) | | +| `ofs-delta` | ✅ | | +| `agent` | ✅ | | +| `object-format` | ❌ | | +| `symref` | ✅ | | +| `shallow` | ✅ | | +| `deepen-since` | ✅ | | +| `deepen-not` | ❌ | | +| `deepen-relative` | ❌ | | +| `no-progress` | ✅ | | +| `include-tag` | ✅ | | +| `report-status` | ✅ | | +| `report-status-v2` | ❌ | | +| `delete-refs` | ✅ | | +| `quiet` | ❌ | | +| `atomic` | ✅ | | +| `push-options` | ✅ | | +| `allow-tip-sha1-in-want` | ✅ | | +| `allow-reachable-sha1-in-want` | ❌ | | +| `push-cert=` | ❌ | | +| `filter` | ❌ | | +| `session-id=` | ❌ | | + +## Transport Schemes + +| Scheme | Status | Notes | Examples | +| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- | +| `http(s)://` (dumb) | ❌ | | | +| `http(s)://` (smart) | ✅ | | | +| `git://` | ✅ | | | +| `ssh://` | ✅ | | | +| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | | +| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) | + +## SHA256 + +| Feature | Sub-feature | Status | Notes | Examples | +| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ | +| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) | +| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) | +| `pull` | | ❌ | | | +| `fetch` | | ❌ | | | +| `push` | | ❌ | | | + +## Other features + +| Feature | Sub-feature | Status | Notes | Examples | +| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- | +| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | | +| `config` | `--global`
`--system` | ✅ | Read-only. | | +| `gitignore` | | ✅ | | | +| `gitattributes` | | ✅ | | | +| `git-worktree` | | ❌ | Multiple worktrees are not supported. | | diff --git a/vendor/github.com/go-git/go-git/v5/EXTENDING.md b/vendor/github.com/go-git/go-git/v5/EXTENDING.md new file mode 100644 index 00000000..a2778e34 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/EXTENDING.md @@ -0,0 +1,78 @@ +# Extending go-git + +`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features: + +## Dot Git Storers + +Dot git storers are the components responsible for storing the Git internal files, including objects and references. + +The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this: + +```go + r, err := git.Init(memory.NewStorage(), nil) +``` + +The `filesystem` storer stores the data in the OS filesystem, and can be used as follows: + +```go + r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil) +``` + +New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16). + +## Filesystem + +Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows: + +```go + fs := memfs.New() + r, err := git.Init(memory.NewStorage(), fs) +``` + +The same operation can be done against the OS filesystem: + +```go + fs := osfs.New("/tmp/foo") + r, err := git.Init(memory.NewStorage(), fs) +``` + +New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52). + +## Transport Schemes + +Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them. + +The built-in implementations can be replaced by calling `client.InstallProtocol`. + +An example of changing the built-in `https` implementation to skip TLS could look like this: + +```go + customClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + client.InstallProtocol("https", githttp.NewClient(customClient)) +``` + +Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`). + +## Cache + +Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17). + +Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface. + +## Hash + +`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`. + +The default hash functions can be changed by calling `hash.RegisterHash`. +```go + func init() { + hash.RegisterHash(crypto.SHA1, sha1.New) + } +``` + +New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`. diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile index d10922fb..3d5b54f7 100644 --- a/vendor/github.com/go-git/go-git/v5/Makefile +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -27,7 +27,14 @@ build-git: test: @echo "running against `git version`"; \ - $(GOTEST) ./... + $(GOTEST) -race ./... + $(GOTEST) -v _examples/common_test.go _examples/common.go --examples + +TEMP_REPO := $(shell mktemp) +test-sha256: + $(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO) + cd $(TEMP_REPO) && git fsck + rm -rf $(TEMP_REPO) test-coverage: @echo "running against `git version`"; \ @@ -35,4 +42,13 @@ test-coverage: $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... clean: - rm -rf $(GIT_DIST_PATH) \ No newline at end of file + rm -rf $(GIT_DIST_PATH) + +fuzz: + @go test -fuzz=FuzzParser $(PWD)/internal/revision + @go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config + @go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile + @go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object + @go test -fuzz=FuzzDecode $(PWD)/plumbing/object + @go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp + @go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport diff --git a/vendor/github.com/go-git/go-git/v5/SECURITY.md b/vendor/github.com/go-git/go-git/v5/SECURITY.md new file mode 100644 index 00000000..0d2f8d03 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/SECURITY.md @@ -0,0 +1,38 @@ +# go-git Security Policy + +The purpose of this security policy is to outline `go-git`'s process +for reporting, handling and disclosing security sensitive information. + +## Supported Versions + +The project follows a version support policy where only the latest minor +release is actively supported. Therefore, only issues that impact the latest +minor release will be fixed. Users are encouraged to upgrade to the latest +minor/patch release to benefit from the most up-to-date features, bug fixes, +and security enhancements.​ + +The supported versions policy applies to both the `go-git` library and its +associated repositories within the `go-git` org. + +## Reporting Security Issues + +Please report any security vulnerabilities or potential weaknesses in `go-git` +privately via go-git-security@googlegroups.com. Do not publicly disclose the +details of the vulnerability until a fix has been implemented and released. + +During the process the project maintainers will investigate the report, so please +provide detailed information, including steps to reproduce, affected versions, and any mitigations if known. + +The project maintainers will acknowledge the receipt of the report and work with +the reporter to validate and address the issue. + +Please note that `go-git` does not have any bounty programs, and therefore do +not provide financial compensation for disclosures. + +## Security Disclosure Process + +The project maintainers will make every effort to promptly address security issues. + +Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures. + +All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories. diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go index 43634b32..2a877dcd 100644 --- a/vendor/github.com/go-git/go-git/v5/blame.go +++ b/vendor/github.com/go-git/go-git/v5/blame.go @@ -2,16 +2,18 @@ package git import ( "bytes" + "container/heap" "errors" "fmt" + "io" "strconv" - "strings" "time" "unicode/utf8" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/utils/diff" + "github.com/sergi/go-diff/diffmatchpatch" ) // BlameResult represents the result of a Blame operation. @@ -29,53 +31,26 @@ type BlameResult struct { func Blame(c *object.Commit, path string) (*BlameResult, error) { // The file to blame is identified by the input arguments: // commit and path. commit is a Commit object obtained from a Repository. Path - // represents a path to a specific file contained into the repository. + // represents a path to a specific file contained in the repository. // - // Blaming a file is a two step process: + // Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified. // - // 1. Create a linear history of the commits affecting a file. We use - // revlist.New for that. + // When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its + // parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its + // parents then it will assign the change to itself. // - // 2. Then build a graph with a node for every line in every file in - // the history of the file. + // When encountering 2 parents that have made the same change to a file it will choose the parent that was merged + // into the current branch first (this is determined by the order of the parents inside the commit). // - // Each node is assigned a commit: Start by the nodes in the first - // commit. Assign that commit as the creator of all its lines. - // - // Then jump to the nodes in the next commit, and calculate the diff - // between the two files. Newly created lines get - // assigned the new commit as its origin. Modified lines also get - // this new commit. Untouched lines retain the old commit. - // - // All this work is done in the assignOrigin function which holds all - // the internal relevant data in a "blame" struct, that is not - // exported. - // - // TODO: ways to improve the efficiency of this function: - // 1. Improve revlist - // 2. Improve how to traverse the history (example a backward traversal will - // be much more efficient) - // - // TODO: ways to improve the function in general: - // 1. Add memoization between revlist and assign. - // 2. It is using much more memory than needed, see the TODOs below. + // This currently works on a line by line basis, if performance becomes an issue it could be changed to work with + // hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary. b := new(blame) b.fRev = c b.path = path + b.q = new(priorityQueue) - // get all the file revisions - if err := b.fillRevs(); err != nil { - return nil, err - } - - // calculate the line tracking graph and fill in - // file contents in data. - if err := b.fillGraphAndData(); err != nil { - return nil, err - } - - file, err := b.fRev.File(b.path) + file, err := b.fRev.File(path) if err != nil { return nil, err } @@ -83,13 +58,59 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) { if err != nil { return nil, err } + finalLength := len(finalLines) - // Each node (line) holds the commit where it was introduced or - // last modified. To achieve that we use the FORWARD algorithm - // described in Zimmermann, et al. "Mining Version Archives for - // Co-changed Lines", in proceedings of the Mining Software - // Repositories workshop, Shanghai, May 22-23, 2006. - lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) + needsMap := make([]lineMap, finalLength) + for i := range needsMap { + needsMap[i] = lineMap{i, i, nil, -1} + } + contents, err := file.Contents() + if err != nil { + return nil, err + } + b.q.Push(&queueItem{ + nil, + nil, + c, + path, + contents, + needsMap, + 0, + false, + 0, + }) + items := make([]*queueItem, 0) + for { + items = items[:0] + for { + if b.q.Len() == 0 { + return nil, errors.New("invalid state: no items left on the blame queue") + } + item := b.q.Pop() + items = append(items, item) + next := b.q.Peek() + if next == nil || next.Hash != item.Commit.Hash { + break + } + } + finished, err := b.addBlames(items) + if err != nil { + return nil, err + } + if finished == true { + break + } + } + if err != nil { + return nil, err + } + + b.lineToCommit = make([]*object.Commit, finalLength) + for i := range needsMap { + b.lineToCommit[i] = needsMap[i].Commit + } + + lines, err := newLines(finalLines, b.lineToCommit) if err != nil { return nil, err } @@ -105,6 +126,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) { type Line struct { // Author is the email address of the last author that modified the line. Author string + // AuthorName is the name of the last author that modified the line. + AuthorName string // Text is the original text of the line. Text string // Date is when the original text of the line was introduced @@ -113,31 +136,21 @@ type Line struct { Hash plumbing.Hash } -func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { +func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line { return &Line{ - Author: author, - Text: text, - Hash: hash, - Date: date, + Author: author, + AuthorName: authorName, + Text: text, + Hash: hash, + Date: date, } } func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { - lcontents := len(contents) - lcommits := len(commits) - - if lcontents != lcommits { - if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { - contents = append(contents, "\n") - } else { - return nil, errors.New("contents and commits have different length") - } - } - - result := make([]*Line, 0, lcontents) + result := make([]*Line, 0, len(contents)) for i := range contents { result = append(result, newLine( - commits[i].Author.Email, contents[i], + commits[i].Author.Email, commits[i].Author.Name, contents[i], commits[i].Author.When, commits[i].Hash, )) } @@ -152,151 +165,426 @@ type blame struct { path string // the commit of the final revision of the file to blame fRev *object.Commit - // the chain of revisions affecting the the file to blame - revs []*object.Commit - // the contents of the file across all its revisions - data []string - // the graph of the lines in the file across all the revisions - graph [][]*object.Commit + // resolved lines + lineToCommit []*object.Commit + // queue of commits that need resolving + q *priorityQueue } -// calculate the history of a file "path", starting from commit "from", sorted by commit date. -func (b *blame) fillRevs() error { - var err error - - b.revs, err = references(b.fRev, b.path) - return err +type lineMap struct { + Orig, Cur int + Commit *object.Commit + FromParentNo int } -// build graph of a file from its revision history -func (b *blame) fillGraphAndData() error { - //TODO: not all commits are needed, only the current rev and the prev - b.graph = make([][]*object.Commit, len(b.revs)) - b.data = make([]string, len(b.revs)) // file contents in all the revisions - // for every revision of the file, starting with the first - // one... - for i, rev := range b.revs { +func (b *blame) addBlames(curItems []*queueItem) (bool, error) { + curItem := curItems[0] + + // Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates + // not only if they are all the same. + if len(curItems) == 1 { + curItems = nil + } else if curItem.IdenticalToChild { + allSame := true + lenCurItems := len(curItems) + lowestParentNo := curItem.ParentNo + for i := 1; i < lenCurItems; i++ { + if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child { + allSame = false + break + } + lowestParentNo = min(lowestParentNo, curItems[i].ParentNo) + } + if allSame { + curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1 + curItems = nil // free the memory + curItem.ParentNo = lowestParentNo + + // Now check if we can remove the parent completely + for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 { + oldChild := curItem.Child + curItem.Child = oldChild.Child + curItem.ParentNo = oldChild.ParentNo + } + } + } + + // if we have more than 1 item for this commit, create a single needsMap + if len(curItems) > 1 { + curItem.MergedChildren = make([]childToNeedsMap, len(curItems)) + for i, c := range curItems { + curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo} + } + newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap)) + newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...) + + for i := 1; i < len(curItems); i++ { + cur := curItems[i].NeedsMap + n := 0 // position in newNeedsMap + c := 0 // position in current list + for c < len(cur) { + if n == len(newNeedsMap) { + newNeedsMap = append(newNeedsMap, cur[c:]...) + break + } else if newNeedsMap[n].Cur == cur[c].Cur { + n++ + c++ + } else if newNeedsMap[n].Cur < cur[c].Cur { + n++ + } else { + newNeedsMap = append(newNeedsMap, cur[c]) + newPos := len(newNeedsMap) - 1 + for newPos > n { + newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1] + newPos-- + } + } + } + } + curItem.NeedsMap = newNeedsMap + curItem.IdenticalToChild = false + curItem.Child = nil + curItems = nil // free the memory + } + + parents, err := parentsContainingPath(curItem.path, curItem.Commit) + if err != nil { + return false, err + } + + anyPushed := false + for parnetNo, prev := range parents { + currentHash, err := blobHash(curItem.path, curItem.Commit) + if err != nil { + return false, err + } + prevHash, err := blobHash(prev.Path, prev.Commit) + if err != nil { + return false, err + } + if currentHash == prevHash { + if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild { + // commit that has 1 parent and 1 child and is the same as both, bypass it completely + b.q.Push(&queueItem{ + Child: curItem.Child, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item + IdenticalToChild: true, + ParentNo: curItem.ParentNo, + }) + } else { + b.q.Push(&queueItem{ + Child: curItem, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy + IdenticalToChild: true, + ParentNo: parnetNo, + }) + curItem.numParentsNeedResolving++ + } + anyPushed = true + continue + } + // get the contents of the file - file, err := rev.File(b.path) + file, err := prev.Commit.File(prev.Path) if err != nil { - return nil + return false, err } - b.data[i], err = file.Contents() + prevContents, err := file.Contents() if err != nil { - return err + return false, err } - nLines := countLines(b.data[i]) - // create a node for each line - b.graph[i] = make([]*object.Commit, nLines) - // assign a commit to each node - // if this is the first revision, then the node is assigned to - // this first commit. - if i == 0 { - for j := 0; j < nLines; j++ { - b.graph[i][j] = b.revs[i] + + hunks := diff.Do(prevContents, curItem.Contents) + prevl := -1 + curl := -1 + need := 0 + getFromParent := make([]lineMap, 0) + out: + for h := range hunks { + hLines := countLines(hunks[h].Text) + for hl := 0; hl < hLines; hl++ { + switch { + case hunks[h].Type == diffmatchpatch.DiffEqual: + prevl++ + curl++ + if curl == curItem.NeedsMap[need].Cur { + // add to needs + getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1}) + // move to next need + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case hunks[h].Type == diffmatchpatch.DiffInsert: + curl++ + if curl == curItem.NeedsMap[need].Cur { + // the line we want is added, it may have been added here (or by another parent), skip it for now + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case hunks[h].Type == diffmatchpatch.DiffDelete: + prevl += hLines + continue out + default: + return false, errors.New("invalid state: invalid hunk Type") + } } - } else { - // if this is not the first commit, then assign to the old - // commit or to the new one, depending on what the diff - // says. - b.assignOrigin(i, i-1) + } + + if len(getFromParent) > 0 { + b.q.Push(&queueItem{ + curItem, + nil, + prev.Commit, + prev.Path, + prevContents, + getFromParent, + 0, + false, + parnetNo, + }) + curItem.numParentsNeedResolving++ + anyPushed = true } } - return nil -} -// sliceGraph returns a slice of commits (one per line) for a particular -// revision of a file (0=first revision). -func (b *blame) sliceGraph(i int) []*object.Commit { - fVs := b.graph[i] - result := make([]*object.Commit, 0, len(fVs)) - for _, v := range fVs { - c := *v - result = append(result, &c) + curItem.Contents = "" // no longer need, free the memory + + if !anyPushed { + return finishNeeds(curItem) } - return result + + return false, nil } -// Assigns origin to vertexes in current (c) rev from data in its previous (p) -// revision -func (b *blame) assignOrigin(c, p int) { - // assign origin based on diff info - hunks := diff.Do(b.data[p], b.data[c]) - sl := -1 // source line - dl := -1 // destination line - for h := range hunks { - hLines := countLines(hunks[h].Text) - for hl := 0; hl < hLines; hl++ { - switch { - case hunks[h].Type == 0: - sl++ - dl++ - b.graph[c][dl] = b.graph[p][sl] - case hunks[h].Type == 1: - dl++ - b.graph[c][dl] = b.revs[c] - case hunks[h].Type == -1: - sl++ - default: - panic("unreachable") +func finishNeeds(curItem *queueItem) (bool, error) { + // any needs left in the needsMap must have come from this revision + for i := range curItem.NeedsMap { + if curItem.NeedsMap[i].Commit == nil { + curItem.NeedsMap[i].Commit = curItem.Commit + curItem.NeedsMap[i].FromParentNo = -1 + } + } + + if curItem.Child == nil && curItem.MergedChildren == nil { + return true, nil + } + + if curItem.MergedChildren == nil { + return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo) + } + + for _, ctn := range curItem.MergedChildren { + m := 0 // position in merged needs map + p := 0 // position in parent needs map + for p < len(ctn.NeedsMap) { + if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur { + ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit + m++ + p++ + } else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur { + p++ + } else { + m++ + } + } + finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo) + if finished || err != nil { + return finished, err + } + } + + return false, nil +} + +func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) { + if identicalToChild { + for i := range child.NeedsMap { + l := &child.NeedsMap[i] + if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig { + return false, errors.New("needsMap isn't the same? Why not??") + } + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } + } + } else { + i := 0 + out: + for j := range child.NeedsMap { + l := &child.NeedsMap[j] + for needsMap[i].Orig < l.Cur { + i++ + if i == len(needsMap) { + break out + } + } + if l.Cur == needsMap[i].Orig { + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } } } } + child.numParentsNeedResolving-- + if child.numParentsNeedResolving == 0 { + finished, err := finishNeeds(child) + if finished || err != nil { + return finished, err + } + } + + return false, nil } -// GoString prints the results of a Blame using git-blame's style. -func (b *blame) GoString() string { +// String prints the results of a Blame using git-blame's style. +func (b BlameResult) String() string { var buf bytes.Buffer - file, err := b.fRev.File(b.path) - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - contents, err := file.Contents() - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - - lines := strings.Split(contents, "\n") // max line number length - mlnl := len(strconv.Itoa(len(lines))) + mlnl := len(strconv.Itoa(len(b.Lines))) // max author length mal := b.maxAuthorLength() - format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", - mal, mlnl) + format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl) - fVs := b.graph[len(b.graph)-1] - for ln, v := range fVs { - fmt.Fprintf(&buf, format, v.Hash.String()[:8], - prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) + for ln := range b.Lines { + _, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8], + b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text) } return buf.String() } -// utility function to pretty print the author. -func prettyPrintAuthor(c *object.Commit) string { - return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) -} - // utility function to calculate the number of runes needed // to print the longest author name in the blame of a file. -func (b *blame) maxAuthorLength() int { - memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) - fVs := b.graph[len(b.graph)-1] +func (b BlameResult) maxAuthorLength() int { m := 0 - for ln := range fVs { - if _, ok := memo[fVs[ln].Hash]; ok { - continue - } - memo[fVs[ln].Hash] = struct{}{} - m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) + for ln := range b.Lines { + m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName)) } return m } +func min(a, b int) int { + if a < b { + return a + } + return b +} + func max(a, b int) int { if a > b { return a } return b } + +type childToNeedsMap struct { + Child *queueItem + NeedsMap []lineMap + IdenticalToChild bool + ParentNo int +} + +type queueItem struct { + Child *queueItem + MergedChildren []childToNeedsMap + Commit *object.Commit + path string + Contents string + NeedsMap []lineMap + numParentsNeedResolving int + IdenticalToChild bool + ParentNo int +} + +type priorityQueueImp []*queueItem + +func (pq *priorityQueueImp) Len() int { return len(*pq) } +func (pq *priorityQueueImp) Less(i, j int) bool { + return !(*pq)[i].Commit.Less((*pq)[j].Commit) +} +func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] } +func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) } +func (pq *priorityQueueImp) Pop() any { + n := len(*pq) + ret := (*pq)[n-1] + (*pq)[n-1] = nil // ovoid memory leak + *pq = (*pq)[0 : n-1] + + return ret +} +func (pq *priorityQueueImp) Peek() *object.Commit { + if len(*pq) == 0 { + return nil + } + return (*pq)[0].Commit +} + +type priorityQueue priorityQueueImp + +func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) } +func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() } +func (pq *priorityQueue) Push(c *queueItem) { + heap.Push((*priorityQueueImp)(pq), c) +} +func (pq *priorityQueue) Pop() *queueItem { + return heap.Pop((*priorityQueueImp)(pq)).(*queueItem) +} +func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() } + +type parentCommit struct { + Commit *object.Commit + Path string +} + +func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) { + // TODO: benchmark this method making git.object.Commit.parent public instead of using + // an iterator + var result []parentCommit + iter := c.Parents() + for { + parent, err := iter.Next() + if err == io.EOF { + return result, nil + } + if err != nil { + return nil, err + } + if _, err := parent.File(path); err == nil { + result = append(result, parentCommit{parent, path}) + } else { + // look for renames + patch, err := parent.Patch(c) + if err != nil { + return nil, err + } else if patch != nil { + for _, fp := range patch.FilePatches() { + from, to := fp.Files() + if from != nil && to != nil && to.Path() == path { + result = append(result, parentCommit{parent, from.Path()}) + break + } + } + } + } + } +} + +func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) { + file, err := commit.File(path) + if err != nil { + return plumbing.ZeroHash, err + } + return file.Hash, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/common.go b/vendor/github.com/go-git/go-git/v5/common.go index f837a265..6174339a 100644 --- a/vendor/github.com/go-git/go-git/v5/common.go +++ b/vendor/github.com/go-git/go-git/v5/common.go @@ -2,8 +2,6 @@ package git import "strings" -const defaultDotGitPath = ".git" - // countLines returns the number of lines in a string à la git, this is // The newline character is assumed to be '\n'. The empty string // contains 0 lines. If the last line of the string doesn't end with a diff --git a/vendor/github.com/go-git/go-git/v5/config/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/config/BUILD.bazel index 6c1d2fd1..776fb08c 100644 --- a/vendor/github.com/go-git/go-git/v5/config/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/config/BUILD.bazel @@ -7,14 +7,15 @@ go_library( "config.go", "modules.go", "refspec.go", + "url.go", ], importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/config", importpath = "github.com/go-git/go-git/v5/config", visibility = ["//visibility:public"], deps = [ + "//vendor/github.com/go-git/go-billy/v5/osfs", "//vendor/github.com/go-git/go-git/v5/internal/url", "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/format/config", - "//vendor/github.com/mitchellh/go-homedir", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/config/branch.go b/vendor/github.com/go-git/go-git/v5/config/branch.go index fe86cf54..db2cb499 100644 --- a/vendor/github.com/go-git/go-git/v5/config/branch.go +++ b/vendor/github.com/go-git/go-git/v5/config/branch.go @@ -2,6 +2,7 @@ package config import ( "errors" + "strings" "github.com/go-git/go-git/v5/plumbing" format "github.com/go-git/go-git/v5/plumbing/format/config" @@ -26,6 +27,12 @@ type Branch struct { // "true" and "interactive". "false" is undocumented and // typically represented by the non-existence of this field Rebase string + // Description explains what the branch is for. + // Multi-line explanations may be used. + // + // Original git command to edit: + // git branch --edit-description + Description string raw *format.Subsection } @@ -47,7 +54,7 @@ func (b *Branch) Validate() error { return errBranchInvalidRebase } - return nil + return plumbing.NewBranchReferenceName(b.Name).Validate() } func (b *Branch) marshal() *format.Subsection { @@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection { b.raw.SetOption(rebaseKey, b.Rebase) } + if b.Description == "" { + b.raw.RemoveOption(descriptionKey) + } else { + desc := quoteDescription(b.Description) + b.raw.SetOption(descriptionKey, desc) + } + return b.raw } +// hack to trigger conditional quoting in the +// plumbing/format/config/Encoder.encodeOptions +// +// Current Encoder implementation uses Go %q format if value contains a backslash character, +// which is not consistent with reference git implementation. +// git just replaces newline characters with \n, while Encoder prints them directly. +// Until value quoting fix, we should escape description value by replacing newline characters with \n. +func quoteDescription(desc string) string { + return strings.ReplaceAll(desc, "\n", `\n`) +} + func (b *Branch) unmarshal(s *format.Subsection) error { b.raw = s @@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error { b.Remote = b.raw.Options.Get(remoteSection) b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) b.Rebase = b.raw.Options.Get(rebaseKey) + b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey)) return b.Validate() } + +// hack to enable conditional quoting in the +// plumbing/format/config/Encoder.encodeOptions +// goto quoteDescription for details. +func unquoteDescription(desc string) string { + return strings.ReplaceAll(desc, `\n`, "\n") +} diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go index 7d6ab588..6d41c15d 100644 --- a/vendor/github.com/go-git/go-git/v5/config/config.go +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -6,15 +6,15 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" "strconv" + "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/internal/url" + "github.com/go-git/go-git/v5/plumbing" format "github.com/go-git/go-git/v5/plumbing/format/config" - "github.com/mitchellh/go-homedir" ) const ( @@ -59,12 +59,14 @@ type Config struct { // CommentChar is the character indicating the start of a // comment for commands like commit and tag CommentChar string + // RepositoryFormatVersion identifies the repository format and layout version. + RepositoryFormatVersion format.RepositoryFormatVersion } User struct { - // Name is the personal name of the author and the commiter of a commit. + // Name is the personal name of the author and the committer of a commit. Name string - // Email is the email of the author and the commiter of a commit. + // Email is the email of the author and the committer of a commit. Email string } @@ -76,9 +78,9 @@ type Config struct { } Committer struct { - // Name is the personal name of the commiter of a commit. + // Name is the personal name of the committer of a commit. Name string - // Email is the email of the the commiter of a commit. + // Email is the email of the committer of a commit. Email string } @@ -89,6 +91,24 @@ type Config struct { Window uint } + Init struct { + // DefaultBranch Allows overriding the default branch name + // e.g. when initializing a new repository or when cloning + // an empty repository. + DefaultBranch string + } + + Extensions struct { + // ObjectFormat specifies the hash algorithm to use. The + // acceptable values are sha1 and sha256. If not specified, + // sha1 is assumed. It is an error to specify this key unless + // core.repositoryFormatVersion is 1. + // + // This setting must not be changed after repository initialization + // (e.g. clone or init). + ObjectFormat format.ObjectFormat + } + // Remotes list of repository remotes, the key of the map is the name // of the remote, should equal to RemoteConfig.Name. Remotes map[string]*RemoteConfig @@ -98,6 +118,9 @@ type Config struct { // Branches list of branches, the key is the branch name and should // equal Branch.Name Branches map[string]*Branch + // URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the + // key instead. + URLs map[string]*URL // Raw contains the raw information of a config file. The main goal is // preserve the parsed information from the original format, to avoid // dropping unsupported fields. @@ -110,6 +133,7 @@ func NewConfig() *Config { Remotes: make(map[string]*RemoteConfig), Submodules: make(map[string]*Submodule), Branches: make(map[string]*Branch), + URLs: make(map[string]*URL), Raw: format.New(), } @@ -120,7 +144,7 @@ func NewConfig() *Config { // ReadConfig reads a config file from a io.Reader. func ReadConfig(r io.Reader) (*Config, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return nil, err } @@ -134,11 +158,11 @@ func ReadConfig(r io.Reader) (*Config, error) { } // LoadConfig loads a config file from a given scope. The returned Config, -// contains exclusively information fom the given scope. If couldn't find a -// config file to the given scope, a empty one is returned. +// contains exclusively information from the given scope. If it couldn't find a +// config file to the given scope, an empty one is returned. func LoadConfig(scope Scope) (*Config, error) { if scope == LocalScope { - return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.") + return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer") } files, err := Paths(scope) @@ -147,7 +171,7 @@ func LoadConfig(scope Scope) (*Config, error) { } for _, file := range files { - f, err := os.Open(file) + f, err := osfs.Default.Open(file) if err != nil { if os.IsNotExist(err) { continue @@ -173,7 +197,7 @@ func Paths(scope Scope) ([]string, error) { files = append(files, filepath.Join(xdg, "git/config")) } - home, err := homedir.Dir() + home, err := os.UserHomeDir() if err != nil { return nil, err } @@ -215,24 +239,32 @@ func (c *Config) Validate() error { } const ( - remoteSection = "remote" - submoduleSection = "submodule" - branchSection = "branch" - coreSection = "core" - packSection = "pack" - userSection = "user" - authorSection = "author" - committerSection = "committer" - fetchKey = "fetch" - urlKey = "url" - bareKey = "bare" - worktreeKey = "worktree" - commentCharKey = "commentChar" - windowKey = "window" - mergeKey = "merge" - rebaseKey = "rebase" - nameKey = "name" - emailKey = "email" + remoteSection = "remote" + submoduleSection = "submodule" + branchSection = "branch" + coreSection = "core" + packSection = "pack" + userSection = "user" + authorSection = "author" + committerSection = "committer" + initSection = "init" + urlSection = "url" + extensionsSection = "extensions" + fetchKey = "fetch" + urlKey = "url" + bareKey = "bare" + worktreeKey = "worktree" + commentCharKey = "commentChar" + windowKey = "window" + mergeKey = "merge" + rebaseKey = "rebase" + nameKey = "name" + emailKey = "email" + descriptionKey = "description" + defaultBranchKey = "defaultBranch" + repositoryFormatVersionKey = "repositoryformatversion" + objectFormat = "objectformat" + mirrorKey = "mirror" // DefaultPackWindow holds the number of previous objects used to // generate deltas. The value 10 is the same used by git command. @@ -251,6 +283,7 @@ func (c *Config) Unmarshal(b []byte) error { c.unmarshalCore() c.unmarshalUser() + c.unmarshalInit() if err := c.unmarshalPack(); err != nil { return err } @@ -260,6 +293,10 @@ func (c *Config) Unmarshal(b []byte) error { return err } + if err := c.unmarshalURLs(); err != nil { + return err + } + return c.unmarshalRemotes() } @@ -313,6 +350,25 @@ func (c *Config) unmarshalRemotes() error { c.Remotes[r.Name] = r } + // Apply insteadOf url rules + for _, r := range c.Remotes { + r.applyURLRules(c.URLs) + } + + return nil +} + +func (c *Config) unmarshalURLs() error { + s := c.Raw.Section(urlSection) + for _, sub := range s.Subsections { + r := &URL{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.URLs[r.Name] = r + } + return nil } @@ -344,14 +400,22 @@ func (c *Config) unmarshalBranches() error { return nil } +func (c *Config) unmarshalInit() { + s := c.Raw.Section(initSection) + c.Init.DefaultBranch = s.Options.Get(defaultBranchKey) +} + // Marshal returns Config encoded as a git-config file. func (c *Config) Marshal() ([]byte, error) { c.marshalCore() + c.marshalExtensions() c.marshalUser() c.marshalPack() c.marshalRemotes() c.marshalSubmodules() c.marshalBranches() + c.marshalURLs() + c.marshalInit() buf := bytes.NewBuffer(nil) if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { @@ -364,12 +428,24 @@ func (c *Config) Marshal() ([]byte, error) { func (c *Config) marshalCore() { s := c.Raw.Section(coreSection) s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) + if string(c.Core.RepositoryFormatVersion) != "" { + s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion)) + } if c.Core.Worktree != "" { s.SetOption(worktreeKey, c.Core.Worktree) } } +func (c *Config) marshalExtensions() { + // Extensions are only supported on Version 1, therefore + // ignore them otherwise. + if c.Core.RepositoryFormatVersion == format.Version_1 { + s := c.Raw.Section(extensionsSection) + s.SetOption(objectFormat, string(c.Extensions.ObjectFormat)) + } +} + func (c *Config) marshalUser() { s := c.Raw.Section(userSection) if c.User.Name != "" { @@ -475,6 +551,27 @@ func (c *Config) marshalBranches() { s.Subsections = newSubsections } +func (c *Config) marshalURLs() { + s := c.Raw.Section(urlSection) + s.Subsections = make(format.Subsections, len(c.URLs)) + + var i int + for _, r := range c.URLs { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + s.Subsections[i] = section + i++ + } +} + +func (c *Config) marshalInit() { + s := c.Raw.Section(initSection) + if c.Init.DefaultBranch != "" { + s.SetOption(defaultBranchKey, c.Init.DefaultBranch) + } +} + // RemoteConfig contains the configuration for a given remote repository. type RemoteConfig struct { // Name of the remote @@ -482,6 +579,14 @@ type RemoteConfig struct { // URLs the URLs of a remote repository. It must be non-empty. Fetch will // always use the first URL, while push will use all of them. URLs []string + // Mirror indicates that the repository is a mirror of remote. + Mirror bool + + // insteadOfRulesApplied have urls been modified + insteadOfRulesApplied bool + // originalURLs are the urls before applying insteadOf rules + originalURLs []string + // Fetch the default set of "refspec" for fetch operation Fetch []RefSpec @@ -510,7 +615,7 @@ func (c *RemoteConfig) Validate() error { c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} } - return nil + return plumbing.NewRemoteHEADReferenceName(c.Name).Validate() } func (c *RemoteConfig) unmarshal(s *format.Subsection) error { @@ -529,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error { c.Name = c.raw.Name c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) c.Fetch = fetch + c.Mirror = c.raw.Options.Get(mirrorKey) == "true" return nil } @@ -542,7 +648,12 @@ func (c *RemoteConfig) marshal() *format.Subsection { if len(c.URLs) == 0 { c.raw.RemoveOption(urlKey) } else { - c.raw.SetOption(urlKey, c.URLs...) + urls := c.URLs + if c.insteadOfRulesApplied { + urls = c.originalURLs + } + + c.raw.SetOption(urlKey, urls...) } if len(c.Fetch) == 0 { @@ -556,9 +667,30 @@ func (c *RemoteConfig) marshal() *format.Subsection { c.raw.SetOption(fetchKey, values...) } + if c.Mirror { + c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror)) + } + return c.raw } func (c *RemoteConfig) IsFirstURLLocal() bool { return url.IsLocalEndpoint(c.URLs[0]) } + +func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) { + // save original urls + originalURLs := make([]string, len(c.URLs)) + copy(originalURLs, c.URLs) + + for i, url := range c.URLs { + if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil { + c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i]) + c.insteadOfRulesApplied = true + } + } + + if c.insteadOfRulesApplied { + c.originalURLs = originalURLs + } +} diff --git a/vendor/github.com/go-git/go-git/v5/config/refspec.go b/vendor/github.com/go-git/go-git/v5/config/refspec.go index 4bfaa37b..e2cf8c97 100644 --- a/vendor/github.com/go-git/go-git/v5/config/refspec.go +++ b/vendor/github.com/go-git/go-git/v5/config/refspec.go @@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool { return plumbing.IsHash(s.Src()) } -// Src return the src side. +// Src returns the src side. func (s RefSpec) Src() string { spec := string(s) diff --git a/vendor/github.com/go-git/go-git/v5/config/url.go b/vendor/github.com/go-git/go-git/v5/config/url.go new file mode 100644 index 00000000..114d6b26 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/url.go @@ -0,0 +1,81 @@ +package config + +import ( + "errors" + "strings" + + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + errURLEmptyInsteadOf = errors.New("url config: empty insteadOf") +) + +// Url defines Url rewrite rules +type URL struct { + // Name new base url + Name string + // Any URL that starts with this value will be rewritten to start, instead, with . + // When more than one insteadOf strings match a given URL, the longest match is used. + InsteadOf string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates fields of branch +func (b *URL) Validate() error { + if b.InsteadOf == "" { + return errURLEmptyInsteadOf + } + + return nil +} + +const ( + insteadOfKey = "insteadOf" +) + +func (u *URL) unmarshal(s *format.Subsection) error { + u.raw = s + + u.Name = s.Name + u.InsteadOf = u.raw.Option(insteadOfKey) + return nil +} + +func (u *URL) marshal() *format.Subsection { + if u.raw == nil { + u.raw = &format.Subsection{} + } + + u.raw.Name = u.Name + u.raw.SetOption(insteadOfKey, u.InsteadOf) + + return u.raw +} + +func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL { + var longestMatch *URL + for _, u := range urls { + if !strings.HasPrefix(remoteURL, u.InsteadOf) { + continue + } + + // according to spec if there is more than one match, take the logest + if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) { + longestMatch = u + } + } + + return longestMatch +} + +func (u *URL) ApplyInsteadOf(url string) string { + if !strings.HasPrefix(url, u.InsteadOf) { + return url + } + + return u.Name + url[len(u.InsteadOf):] +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/path_util/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/internal/path_util/BUILD.bazel new file mode 100644 index 00000000..fe5f700f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/path_util/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "path_util", + srcs = ["path_util.go"], + importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/internal/path_util", + importpath = "github.com/go-git/go-git/v5/internal/path_util", + visibility = ["//vendor/github.com/go-git/go-git/v5:__subpackages__"], +) diff --git a/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go b/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go new file mode 100644 index 00000000..48e4a3d0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go @@ -0,0 +1,29 @@ +package path_util + +import ( + "os" + "os/user" + "strings" +) + +func ReplaceTildeWithHome(path string) (string, error) { + if strings.HasPrefix(path, "~") { + firstSlash := strings.Index(path, "/") + if firstSlash == 1 { + home, err := os.UserHomeDir() + if err != nil { + return path, err + } + return strings.Replace(path, "~", home, 1), nil + } else if firstSlash > 1 { + username := path[1:firstSlash] + userAccount, err := user.Lookup(username) + if err != nil { + return path, err + } + return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil + } + } + + return path, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go index 8facf17f..8a2a7190 100644 --- a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go @@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) { } return AtDate{t}, nil + case tok == eof: + return nil, &ErrInvalidRevision{s: `missing "}" in @{} structure`} default: date += lit } @@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) { p.unscan() case tok != slash && start: return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} + case tok == eof: + return nil, &ErrInvalidRevision{s: `missing "}" in ^{} structure`} case tok != cbrace: p.unscan() re += lit diff --git a/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/vendor/github.com/go-git/go-git/v5/internal/url/url.go index 14cf133d..26624486 100644 --- a/vendor/github.com/go-git/go-git/v5/internal/url/url.go +++ b/vendor/github.com/go-git/go-git/v5/internal/url/url.go @@ -5,8 +5,10 @@ import ( ) var ( - isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) - scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})(?:\/|:))?(?P[^\\].*\/[^\\].*)$`) + isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) + + // Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37 + scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5}):)?(?P[^\\].*)$`) ) // MatchesScheme returns true if the given string matches a URL-like diff --git a/vendor/github.com/go-git/go-git/v5/object_walker.go b/vendor/github.com/go-git/go-git/v5/object_walker.go index 3fcdd299..3a537bd8 100644 --- a/vendor/github.com/go-git/go-git/v5/object_walker.go +++ b/vendor/github.com/go-git/go-git/v5/object_walker.go @@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { // Fetch the object. obj, err := object.GetObject(p.Storer, hash) if err != nil { - return fmt.Errorf("Getting object %s failed: %v", hash, err) + return fmt.Errorf("getting object %s failed: %v", hash, err) } // Walk all children depending on object type. switch obj := obj.(type) { @@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { return p.walkObjectTree(obj.Target) default: // Error out on unhandled object types. - return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) + return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj) } return nil } diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index 2f936315..d7776dad 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -7,12 +7,13 @@ import ( "strings" "time" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" + formatcfg "github.com/go-git/go-git/v5/plumbing/format/config" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" "github.com/go-git/go-git/v5/plumbing/transport" - "golang.org/x/crypto/openpgp" ) // SubmoduleRescursivity defines how depth will affect any submodule recursive @@ -45,6 +46,14 @@ type CloneOptions struct { ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. SingleBranch bool + // Mirror clones the repository as a mirror. + // + // Compared to a bare clone, mirror not only maps local branches of the + // source to local branches of the target, it maps all refs (including + // remote-tracking branches, notes etc.) and sets up a refspec configuration + // such that all these refs are overwritten by a git remote update in the + // target repository. + Mirror bool // No checkout of HEAD after clone if true. NoCheckout bool // Limit fetching to the specified number of commits. @@ -53,6 +62,9 @@ type CloneOptions struct { // within, using their default settings. This option is ignored if the // cloned repository does not have a worktree. RecurseSubmodules SubmoduleRescursivity + // ShallowSubmodules limit cloning submodules to the 1 level of depth. + // It matches the git command --shallow-submodules. + ShallowSubmodules bool // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. @@ -60,8 +72,42 @@ type CloneOptions struct { // Tags describe how the tags will be fetched from the remote repository, // by default is AllTags. Tags TagMode + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // ProxyOptions provides info required for connecting to a proxy. + ProxyOptions transport.ProxyOptions + // When the repository to clone is on the local machine, instead of + // using hard links, automatically setup .git/objects/info/alternates + // to share the objects with the source repository. + // The resulting repository starts out without any object of its own. + // NOTE: this is a possibly dangerous operation; do not use it unless + // you understand what it does. + // + // [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared + Shared bool } +// MergeOptions describes how a merge should be performed. +type MergeOptions struct { + // Strategy defines the merge strategy to be used. + Strategy MergeStrategy +} + +// MergeStrategy represents the different types of merge strategies. +type MergeStrategy int8 + +const ( + // FastForwardMerge represents a Git merge strategy where the current + // branch can be simply updated to point to the HEAD of the branch being + // merged. This is only possible if the history of the branch being merged + // is a linear descendant of the current branch, with no conflicting commits. + // + // This is the default option. + FastForwardMerge MergeStrategy = iota +) + // Validate validates the fields and sets the default values. func (o *CloneOptions) Validate() error { if o.URL == "" { @@ -87,6 +133,8 @@ func (o *CloneOptions) Validate() error { type PullOptions struct { // Name of the remote to be pulled. If empty, uses the default. RemoteName string + // RemoteURL overrides the remote repo address with a custom URL + RemoteURL string // Remote branch to clone. If empty, uses HEAD. ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. @@ -105,6 +153,12 @@ type PullOptions struct { // Force allows the pull to update a local branch even when the remote // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // ProxyOptions provides info required for connecting to a proxy. + ProxyOptions transport.ProxyOptions } // Validate validates the fields and sets the default values. @@ -131,7 +185,7 @@ const ( // AllTags fetch all tags from the remote (i.e., fetch remote tags // refs/tags/* into local tags with the same name) AllTags - //NoTags fetch no tags from the remote at all + // NoTags fetch no tags from the remote at all NoTags ) @@ -139,7 +193,9 @@ const ( type FetchOptions struct { // Name of the remote to fetch from. Defaults to origin. RemoteName string - RefSpecs []config.RefSpec + // RemoteURL overrides the remote repo address with a custom URL + RemoteURL string + RefSpecs []config.RefSpec // Depth limit fetching to the specified number of commits from the tip of // each remote branch history. Depth int @@ -155,6 +211,15 @@ type FetchOptions struct { // Force allows the fetch to update a local branch even when the remote // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // ProxyOptions provides info required for connecting to a proxy. + ProxyOptions transport.ProxyOptions + // Prune specify that local refs that match given RefSpecs and that do + // not exist remotely will be removed. + Prune bool } // Validate validates the fields and sets the default values. @@ -180,8 +245,16 @@ func (o *FetchOptions) Validate() error { type PushOptions struct { // RemoteName is the name of the remote to be pushed to. RemoteName string - // RefSpecs specify what destination ref to update with what source - // object. A refspec with empty src can be used to delete a reference. + // RemoteURL overrides the remote repo address with a custom URL + RemoteURL string + // RefSpecs specify what destination ref to update with what source object. + // + // The format of a parameter is an optional plus +, followed by + // the source object , followed by a colon :, followed by the destination ref . + // The is often the name of the branch you would want to push, but it can be a SHA-1. + // The tells which ref on the remote side is updated with this push. + // + // A refspec with empty src can be used to delete a reference. RefSpecs []config.RefSpec // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod @@ -194,6 +267,37 @@ type PushOptions struct { // Force allows the push to update a remote branch even when the local // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // RequireRemoteRefs only allows a remote ref to be updated if its current + // value is the one specified here. + RequireRemoteRefs []config.RefSpec + // FollowTags will send any annotated tags with a commit target reachable from + // the refs already being pushed + FollowTags bool + // ForceWithLease allows a force push as long as the remote ref adheres to a "lease" + ForceWithLease *ForceWithLease + // PushOptions sets options to be transferred to the server during push. + Options map[string]string + // Atomic sets option to be an atomic push + Atomic bool + // ProxyOptions provides info required for connecting to a proxy. + ProxyOptions transport.ProxyOptions +} + +// ForceWithLease sets fields on the lease +// If neither RefName nor Hash are set, ForceWithLease protects +// all refs in the refspec by ensuring the ref of the remote in the local repsitory +// matches the one in the ref advertisement. +type ForceWithLease struct { + // RefName, when set will protect the ref by ensuring it matches the + // hash in the ref advertisement. + RefName plumbing.ReferenceName + // Hash is the expected object id of RefName. The push will be rejected unless this + // matches the corresponding object id of RefName in the refs advertisement. + Hash plumbing.Hash } // Validate validates the fields and sets the default values. @@ -230,6 +334,9 @@ type SubmoduleUpdateOptions struct { RecurseSubmodules SubmoduleRescursivity // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod + // Depth limit fetching to the specified number of commits from the tip of + // each remote branch history. + Depth int } var ( @@ -239,9 +346,9 @@ var ( // CheckoutOptions describes how a checkout operation should be performed. type CheckoutOptions struct { - // Hash is the hash of the commit to be checked out. If used, HEAD will be - // in detached mode. If Create is not used, Branch and Hash are mutually - // exclusive. + // Hash is the hash of a commit or tag to be checked out. If used, HEAD + // will be in detached mode. If Create is not used, Branch and Hash are + // mutually exclusive. Hash plumbing.Hash // Branch to be checked out, if Branch and Hash are empty is set to `master`. Branch plumbing.ReferenceName @@ -255,6 +362,8 @@ type CheckoutOptions struct { // target branch. Force and Keep are mutually exclusive, should not be both // set to true. Keep bool + // SparseCheckoutDirectories + SparseCheckoutDirectories []string } // Validate validates the fields and sets the default values. @@ -318,6 +427,11 @@ func (o *ResetOptions) Validate(r *Repository) error { } o.Commit = ref.Hash() + } else { + _, err := r.CommitObject(o.Commit) + if err != nil { + return fmt.Errorf("invalid reset option: %w", err) + } } return nil @@ -347,7 +461,7 @@ type LogOptions struct { // Show only those commits in which the specified file was inserted/updated. // It is equivalent to running `git log -- `. - // this field is kept for compatility, it can be replaced with PathFilter + // this field is kept for compatibility, it can be replaced with PathFilter FileName *string // Filter commits based on the path of files that are updated @@ -374,7 +488,7 @@ var ( ErrMissingAuthor = errors.New("author field is required") ) -// AddOptions describes how a add operation should be performed +// AddOptions describes how an `add` operation should be performed type AddOptions struct { // All equivalent to `git add -A`, update the index not only where the // working tree has a file matching `Path` but also where the index already @@ -382,11 +496,16 @@ type AddOptions struct { // working tree. If no `Path` nor `Glob` is given when `All` option is // used, all files in the entire working tree are updated. All bool - // Path is the exact filepath to a the file or directory to be added. + // Path is the exact filepath to the file or directory to be added. Path string // Glob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. Glob string + // SkipStatus adds the path with no status check. This option is relevant only + // when the `Path` option is specified and does not apply when the `All` option is used. + // Notice that when passing an ignored path it will be added anyway. + // When true it can speed up adding files to the worktree in very large repositories. + SkipStatus bool } // Validate validates the fields and sets the default values. @@ -403,6 +522,10 @@ type CommitOptions struct { // All automatically stage files that have been modified and deleted, but // new files you have not told Git about are not affected. All bool + // AllowEmptyCommits enable empty commits to be created. An empty commit + // is when no changes to the tree were made, but a new commit message is + // provided. The default behavior is false, which results in ErrEmptyCommit. + AllowEmptyCommits bool // Author is the author's signature of the commit. If Author is empty the // Name and Email is read from the config, and time.Now it's used as When. Author *object.Signature @@ -416,10 +539,25 @@ type CommitOptions struct { // commit will not be signed. The private key must be present and already // decrypted. SignKey *openpgp.Entity + // Signer denotes a cryptographic signer to sign the commit with. + // A nil value here means the commit will not be signed. + // Takes precedence over SignKey. + Signer Signer + // Amend will create a new commit object and replace the commit that HEAD currently + // points to. Cannot be used with All nor Parents. + Amend bool } // Validate validates the fields and sets the default values. func (o *CommitOptions) Validate(r *Repository) error { + if o.All && o.Amend { + return errors.New("all and amend cannot be used together") + } + + if o.Amend && len(o.Parents) > 0 { + return errors.New("parents cannot be used with amend") + } + if o.Author == nil { if err := o.loadConfigAuthorAndCommitter(r); err != nil { return err @@ -552,8 +690,35 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { type ListOptions struct { // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // PeelingOption defines how peeled objects are handled during a + // remote list. + PeelingOption PeelingOption + // ProxyOptions provides info required for connecting to a proxy. + ProxyOptions transport.ProxyOptions + // Timeout specifies the timeout in seconds for list operations + Timeout int } +// PeelingOption represents the different ways to handle peeled references. +// +// Peeled references represent the underlying object of an annotated +// (or signed) tag. Refer to upstream documentation for more info: +// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt +type PeelingOption uint8 + +const ( + // IgnorePeeled ignores all peeled reference names. This is the default behavior. + IgnorePeeled PeelingOption = 0 + // OnlyPeeled returns only peeled reference names. + OnlyPeeled PeelingOption = 1 + // AppendPeeled appends peeled reference names to the reference list. + AppendPeeled PeelingOption = 2 +) + // CleanOptions describes how a clean should be performed. type CleanOptions struct { Dir bool @@ -578,7 +743,13 @@ var ( ) // Validate validates the fields and sets the default values. +// +// TODO: deprecate in favor of Validate(r *Repository) in v6. func (o *GrepOptions) Validate(w *Worktree) error { + return o.validate(w.r) +} + +func (o *GrepOptions) validate(r *Repository) error { if !o.CommitHash.IsZero() && o.ReferenceName != "" { return ErrHashOrReference } @@ -586,7 +757,7 @@ func (o *GrepOptions) Validate(w *Worktree) error { // If none of CommitHash and ReferenceName are provided, set commit hash of // the repository's head. if o.CommitHash.IsZero() && o.ReferenceName == "" { - ref, err := w.r.Head() + ref, err := r.Head() if err != nil { return err } @@ -609,3 +780,13 @@ type PlainOpenOptions struct { // Validate validates the fields and sets the default values. func (o *PlainOpenOptions) Validate() error { return nil } + +type PlainInitOptions struct { + InitOptions + // Determines if the repository will have a worktree (non-bare) or not (bare). + Bare bool + ObjectFormat formatcfg.ObjectFormat +} + +// Validate validates the fields and sets the default values. +func (o *PlainInitOptions) Validate() error { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh b/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh new file mode 100644 index 00000000..885548f4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh @@ -0,0 +1,35 @@ +#!/bin/bash -eu +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ + + +go mod download +go get github.com/AdamKorcz/go-118-fuzz-build/testing + +if [ "$SANITIZER" != "coverage" ]; then + sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go + sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go + sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go + sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go +fi + +compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser +compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config +compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta +compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes +compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode +compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp +compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/BUILD.bazel index 12ac7e1a..5768855c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/BUILD.bazel @@ -13,4 +13,5 @@ go_library( importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/plumbing", importpath = "github.com/go-git/go-git/v5/plumbing", visibility = ["//visibility:public"], + deps = ["//vendor/github.com/go-git/go-git/v5/plumbing/hash"], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go index b848a979..ea1a4575 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go @@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool { m != Submodule } -// String returns the FileMode as a string in the standatd git format, +// String returns the FileMode as a string in the standard git format, // this is, an octal number padded with ceros to 7 digits. Malformed // modes are printed in that same format, for easier debugging. // diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/BUILD.bazel index 4e9b6cdd..98631cdf 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "decoder.go", "doc.go", "encoder.go", + "format.go", "option.go", "section.go", ], diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go index 4eac8968..de069aed 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go @@ -11,6 +11,10 @@ type Encoder struct { w io.Writer } +var ( + subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`) +) // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w} @@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error { } func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { - //TODO: escape - if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { + if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil { return err } @@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { func (e *Encoder) encodeOptions(opts Options) error { for _, o := range opts { - pattern := "\t%s = %s\n" - if strings.Contains(o.Value, "\\") { - pattern = "\t%s = %q\n" + var value string + if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") { + value = `"`+valueReplacer.Replace(o.Value)+`"` + } else { + value = o.Value } - if err := e.printf(pattern, o.Key, o.Value); err != nil { + if err := e.printf("\t%s = %s\n", o.Key, value); err != nil { return err } } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go new file mode 100644 index 00000000..4873ea92 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go @@ -0,0 +1,53 @@ +package config + +// RepositoryFormatVersion represents the repository format version, +// as per defined at: +// +// https://git-scm.com/docs/repository-version +type RepositoryFormatVersion string + +const ( + // Version_0 is the format defined by the initial version of git, + // including but not limited to the format of the repository + // directory, the repository configuration file, and the object + // and ref storage. + // + // Specifying the complete behavior of git is beyond the scope + // of this document. + Version_0 = "0" + + // Version_1 is identical to version 0, with the following exceptions: + // + // 1. When reading the core.repositoryformatversion variable, a git + // implementation which supports version 1 MUST also read any + // configuration keys found in the extensions section of the + // configuration file. + // + // 2. If a version-1 repository specifies any extensions.* keys that + // the running git has not implemented, the operation MUST NOT proceed. + // Similarly, if the value of any known key is not understood by the + // implementation, the operation MUST NOT proceed. + // + // Note that if no extensions are specified in the config file, then + // core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides + // no benefit, and makes the repository incompatible with older + // implementations of git). + Version_1 = "1" + + // DefaultRepositoryFormatVersion holds the default repository format version. + DefaultRepositoryFormatVersion = Version_0 +) + +// ObjectFormat defines the object format. +type ObjectFormat string + +const ( + // SHA1 represents the object format used for SHA1. + SHA1 ObjectFormat = "sha1" + + // SHA256 represents the object format used for SHA256. + SHA256 ObjectFormat = "sha256" + + // DefaultObjectFormat holds the default object format. + DefaultObjectFormat = SHA1 +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go index 07f72f35..4625ac58 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go @@ -103,7 +103,7 @@ func (s *Section) RemoveSubsection(name string) *Section { return s } -// Option return the value for the specified key. Empty string is returned if +// Option returns the value for the specified key. Empty string is returned if // key does not exists. func (s *Section) Option(key string) string { return s.Options.Get(key) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go index 39a66a1a..c7678b01 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go @@ -9,7 +9,7 @@ import ( type Operation int const ( - // Equal item represents a equals diff. + // Equal item represents an equals diff. Equal Operation = iota // Add item represents an insert diff. Add @@ -26,15 +26,15 @@ type Patch interface { Message() string } -// FilePatch represents the necessary steps to transform one file to another. +// FilePatch represents the necessary steps to transform one file into another. type FilePatch interface { // IsBinary returns true if this patch is representing a binary file. IsBinary() bool - // Files returns the from and to Files, with all the necessary metadata to + // Files returns the from and to Files, with all the necessary metadata // about them. If the patch creates a new file, "from" will be nil. // If the patch deletes a file, "to" will be nil. Files() (from, to File) - // Chunks returns a slice of ordered changes to transform "from" File to + // Chunks returns a slice of ordered changes to transform "from" File into // "to" File. If the file is a binary one, Chunks will be empty. Chunks() []Chunk } @@ -49,7 +49,7 @@ type File interface { Path() string } -// Chunk represents a portion of a file transformation to another. +// Chunk represents a portion of a file transformation into another. type Chunk interface { // Content contains the portion of the file. Content() string diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go index 413984aa..fa605b19 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go @@ -38,6 +38,10 @@ type UnifiedEncoder struct { // a change. contextLines int + // srcPrefix and dstPrefix are prepended to file paths when encoding a diff. + srcPrefix string + dstPrefix string + // colorConfig is the color configuration. The default is no color. color ColorConfig } @@ -46,6 +50,8 @@ type UnifiedEncoder struct { func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { return &UnifiedEncoder{ Writer: w, + srcPrefix: "a/", + dstPrefix: "b/", contextLines: contextLines, } } @@ -56,6 +62,18 @@ func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { return e } +// SetSrcPrefix sets e's srcPrefix and returns e. +func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder { + e.srcPrefix = prefix + return e +} + +// SetDstPrefix sets e's dstPrefix and returns e. +func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder { + e.dstPrefix = prefix + return e +} + // Encode encodes patch. func (e *UnifiedEncoder) Encode(patch Patch) error { sb := &strings.Builder{} @@ -91,7 +109,8 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil case from != nil && to != nil: hashEquals := from.Hash() == to.Hash() lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()), + fmt.Sprintf("diff --git %s%s %s%s", + e.srcPrefix, from.Path(), e.dstPrefix, to.Path()), ) if from.Mode() != to.Mode() { lines = append(lines, @@ -115,22 +134,22 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil ) } if !hashEquals { - lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary) + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary) } case from == nil: lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()), + fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()), fmt.Sprintf("new file mode %o", to.Mode()), fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), ) - lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary) + lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary) case to == nil: lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()), + fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()), fmt.Sprintf("deleted file mode %o", from.Mode()), fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), ) - lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary) + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary) } sb.WriteString(e.color[Meta]) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/BUILD.bazel index 542a2d60..4183c92f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/BUILD.bazel @@ -13,6 +13,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/go-git/go-billy/v5:go-billy", + "//vendor/github.com/go-git/go-git/v5/internal/path_util", "//vendor/github.com/go-git/go-git/v5/plumbing/format/config", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", ], diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go index 4a26325f..aca5d0db 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -3,28 +3,32 @@ package gitignore import ( "bufio" "bytes" - "io/ioutil" + "io" "os" - "os/user" "strings" "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/internal/path_util" "github.com/go-git/go-git/v5/plumbing/format/config" gioutil "github.com/go-git/go-git/v5/utils/ioutil" ) const ( - commentPrefix = "#" - coreSection = "core" - excludesfile = "excludesfile" - gitDir = ".git" - gitignoreFile = ".gitignore" - gitconfigFile = ".gitconfig" - systemFile = "/etc/gitconfig" + commentPrefix = "#" + coreSection = "core" + excludesfile = "excludesfile" + gitDir = ".git" + gitignoreFile = ".gitignore" + gitconfigFile = ".gitconfig" + systemFile = "/etc/gitconfig" + infoExcludeFile = gitDir + "/info/exclude" ) // readIgnoreFile reads a specific git ignore file. func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { + + ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile) + f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) if err == nil { defer f.Close() @@ -43,10 +47,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [ return } -// ReadPatterns reads gitignore patterns recursively traversing through the directory -// structure. The result is in the ascending order of priority (last higher). +// ReadPatterns reads the .git/info/exclude and then the gitignore patterns +// recursively traversing through the directory structure. The result is in +// the ascending order of priority (last higher). func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { - ps, _ = readIgnoreFile(fs, path, gitignoreFile) + ps, _ = readIgnoreFile(fs, path, infoExcludeFile) + + subps, _ := readIgnoreFile(fs, path, gitignoreFile) + ps = append(ps, subps...) var fis []os.FileInfo fis, err = fs.ReadDir(fs.Join(path...)) @@ -82,7 +90,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { defer gioutil.CheckClose(f, &err) - b, err := ioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return } @@ -108,7 +116,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { return } -// LoadGlobalPatterns loads gitignore patterns from from the gitignore file +// LoadGlobalPatterns loads gitignore patterns from the gitignore file // declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not // exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by @@ -116,16 +124,16 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { // // The function assumes fs is rooted at the root filesystem. func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - usr, err := user.Current() + home, err := os.UserHomeDir() if err != nil { return } - return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) + return loadPatterns(fs, fs.Join(home, gitconfigFile)) } -// LoadSystemPatterns loads gitignore patterns from from the gitignore file -// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does +// LoadSystemPatterns loads gitignore patterns from the gitignore file +// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does // not exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by // the core.excludesfile property does not exist, the function will return nil. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go index 098cb502..450b3cdf 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go @@ -39,6 +39,8 @@ type pattern struct { // ParsePattern parses a gitignore pattern string into the Pattern structure. func ParsePattern(p string, domain []string) Pattern { + // storing domain, copy it to ensure it isn't changed externally + domain = append([]string(nil), domain...) res := pattern{domain: domain} if strings.HasPrefix(p, inclusionPrefix) { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/BUILD.bazel index 727e0ae9..9f881e00 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/BUILD.bazel @@ -14,6 +14,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/go-git/go-git/v5/plumbing", + "//vendor/github.com/go-git/go-git/v5/plumbing/hash", "//vendor/github.com/go-git/go-git/v5/utils/binary", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go index 7768bd65..9afdce30 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go @@ -6,20 +6,21 @@ import ( "errors" "io" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/utils/binary" ) var ( // ErrUnsupportedVersion is returned by Decode when the idx file version // is not supported. - ErrUnsupportedVersion = errors.New("Unsupported version") + ErrUnsupportedVersion = errors.New("unsupported version") // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. - ErrMalformedIdxFile = errors.New("Malformed IDX file") + ErrMalformedIdxFile = errors.New("malformed IDX file") ) const ( fanout = 256 - objectIDLength = 20 + objectIDLength = hash.Size ) // Decoder reads and decodes idx files from an input stream. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go index 26b2e4d6..75147376 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go @@ -1,10 +1,9 @@ package idxfile import ( - "crypto/sha1" - "hash" "io" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/utils/binary" ) @@ -16,7 +15,7 @@ type Encoder struct { // NewEncoder returns a new stream encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() + h := hash.New(hash.CryptoType) mw := io.MultiWriter(w, h) return &Encoder{mw, h} } @@ -133,10 +132,10 @@ func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { return 0, err } - copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) + copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:hash.Size]) if _, err := e.Write(idx.IdxChecksum[:]); err != nil { return 0, err } - return 40, nil + return hash.HexSize, nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go index 64dd8dce..9237a743 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go @@ -8,6 +8,7 @@ import ( encbin "encoding/binary" "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/hash" ) const ( @@ -53,8 +54,8 @@ type MemoryIndex struct { Offset32 [][]byte CRC32 [][]byte Offset64 []byte - PackfileChecksum [20]byte - IdxChecksum [20]byte + PackfileChecksum [hash.Size]byte + IdxChecksum [hash.Size]byte offsetHash map[int64]plumbing.Hash offsetHashIsFull bool diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go index daa16050..c4c21e16 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go @@ -84,11 +84,8 @@ func (w *Writer) OnFooter(h plumbing.Hash) error { w.checksum = h w.finished = true _, err := w.createIndex() - if err != nil { - return err - } - return nil + return err } // creatIndex returns a filled MemoryIndex with the information filled by @@ -139,15 +136,23 @@ func (w *Writer) createIndex() (*MemoryIndex, error) { offset := o.Offset if offset > math.MaxInt32 { - offset = w.addOffset64(offset) + var err error + offset, err = w.addOffset64(offset) + if err != nil { + return nil, err + } } buf.Truncate(0) - binary.WriteUint32(buf, uint32(offset)) + if err := binary.WriteUint32(buf, uint32(offset)); err != nil { + return nil, err + } idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) buf.Truncate(0) - binary.WriteUint32(buf, o.CRC32) + if err := binary.WriteUint32(buf, o.CRC32); err != nil { + return nil, err + } idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) } @@ -161,15 +166,17 @@ func (w *Writer) createIndex() (*MemoryIndex, error) { return idx, nil } -func (w *Writer) addOffset64(pos uint64) uint64 { +func (w *Writer) addOffset64(pos uint64) (uint64, error) { buf := new(bytes.Buffer) - binary.WriteUint64(buf, pos) - w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) + if err := binary.WriteUint64(buf, pos); err != nil { + return 0, err + } + w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) index := uint64(w.offset64 | (1 << 31)) w.offset64++ - return index + return index, nil } func (o objects) Len() int { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/BUILD.bazel index 94d7d6a8..90c5cc85 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/BUILD.bazel @@ -15,6 +15,7 @@ go_library( deps = [ "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/filemode", + "//vendor/github.com/go-git/go-git/v5/plumbing/hash", "//vendor/github.com/go-git/go-git/v5/utils/binary", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go index 036b6365..6778cf74 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go @@ -3,15 +3,14 @@ package index import ( "bufio" "bytes" - "crypto/sha1" "errors" - "hash" "io" - "io/ioutil" + "strconv" "time" "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/utils/binary" ) @@ -49,7 +48,7 @@ type Decoder struct { // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { - h := sha1.New() + h := hash.New(hash.CryptoType) return &Decoder{ r: io.TeeReader(r, h), hash: h, @@ -202,7 +201,7 @@ func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { entrySize := read + len(e.Name) padLen := 8 - entrySize%8 - _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) + _, err := io.CopyN(io.Discard, d.r, int64(padLen)) return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go index 00d4e7a3..fa2d8144 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go @@ -2,19 +2,18 @@ package index import ( "bytes" - "crypto/sha1" "errors" - "hash" "io" "sort" "time" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/utils/binary" ) var ( // EncodeVersionSupported is the range of supported index versions - EncodeVersionSupported uint32 = 2 + EncodeVersionSupported uint32 = 3 // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with // negative timestamp values @@ -29,16 +28,16 @@ type Encoder struct { // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() + h := hash.New(hash.CryptoType) mw := io.MultiWriter(w, h) return &Encoder{mw, h} } // Encode writes the Index to the stream of the encoder. func (e *Encoder) Encode(idx *Index) error { - // TODO: support versions v3 and v4 + // TODO: support v4 // TODO: support extensions - if idx.Version != EncodeVersionSupported { + if idx.Version > EncodeVersionSupported { return ErrUnsupportedVersion } @@ -68,8 +67,12 @@ func (e *Encoder) encodeEntries(idx *Index) error { if err := e.encodeEntry(entry); err != nil { return err } + entryLength := entryHeaderLength + if entry.IntentToAdd || entry.SkipWorktree { + entryLength += 2 + } - wrote := entryHeaderLength + len(entry.Name) + wrote := entryLength + len(entry.Name) if err := e.padEntry(wrote); err != nil { return err } @@ -79,10 +82,6 @@ func (e *Encoder) encodeEntries(idx *Index) error { } func (e *Encoder) encodeEntry(entry *Entry) error { - if entry.IntentToAdd || entry.SkipWorktree { - return ErrUnsupportedVersion - } - sec, nsec, err := e.timeToUint32(&entry.CreatedAt) if err != nil { return err @@ -110,9 +109,25 @@ func (e *Encoder) encodeEntry(entry *Entry) error { entry.GID, entry.Size, entry.Hash[:], - flags, } + flagsFlow := []interface{}{flags} + + if entry.IntentToAdd || entry.SkipWorktree { + var extendedFlags uint16 + + if entry.IntentToAdd { + extendedFlags |= intentToAddMask + } + if entry.SkipWorktree { + extendedFlags |= skipWorkTreeMask + } + + flagsFlow = []interface{}{flags | entryExtended, extendedFlags} + } + + flow = append(flow, flagsFlow...) + if err := binary.Write(e.w, flow...); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go index 649416a2..f4c7647d 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "path/filepath" + "strings" "time" "github.com/go-git/go-git/v5/plumbing" @@ -211,3 +212,20 @@ type EndOfIndexEntry struct { // their contents). Hash plumbing.Hash } + +// SkipUnless applies patterns in the form of A, A/B, A/B/C +// to the index to prevent the files from being checked out +func (i *Index) SkipUnless(patterns []string) { + for _, e := range i.Entries { + var include bool + for _, pattern := range patterns { + if strings.HasPrefix(e.Name, pattern) { + include = true + break + } + } + if !include { + e.SkipWorktree = true + } + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/BUILD.bazel index f246a5e5..a805efc0 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/BUILD.bazel @@ -13,5 +13,6 @@ go_library( deps = [ "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/format/packfile", + "//vendor/github.com/go-git/go-git/v5/utils/sync", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go index b6b2ca06..d7932f4e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go @@ -1,13 +1,13 @@ package objfile import ( - "compress/zlib" "errors" "io" "strconv" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -20,20 +20,22 @@ var ( // Reader implements io.ReadCloser. Close should be called when finished with // the Reader. Close will not close the underlying io.Reader. type Reader struct { - multi io.Reader - zlib io.ReadCloser - hasher plumbing.Hasher + multi io.Reader + zlib io.Reader + zlibref sync.ZLibReader + hasher plumbing.Hasher } // NewReader returns a new Reader reading from r. func NewReader(r io.Reader) (*Reader, error) { - zlib, err := zlib.NewReader(r) + zlib, err := sync.GetZlibReader(r) if err != nil { return nil, packfile.ErrZLib.AddDetails(err.Error()) } return &Reader{ - zlib: zlib, + zlib: zlib.Reader, + zlibref: zlib, }, nil } @@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash { // Close releases any resources consumed by the Reader. Calling Close does not // close the wrapped io.Reader originally passed to NewReader. func (r *Reader) Close() error { - return r.zlib.Close() + sync.PutZlibReader(r.zlibref) + return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go index 2a96a437..0d0f1549 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go @@ -7,6 +7,7 @@ import ( "strconv" "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -18,9 +19,9 @@ var ( // not close the underlying io.Writer. type Writer struct { raw io.Writer - zlib io.WriteCloser hasher plumbing.Hasher multi io.Writer + zlib *zlib.Writer closed bool pending int64 // number of unwritten bytes @@ -31,9 +32,10 @@ type Writer struct { // The returned Writer implements io.WriteCloser. Close should be called when // finished with the Writer. Close will not close the underlying io.Writer. func NewWriter(w io.Writer) *Writer { + zlib := sync.GetZlibWriter(w) return &Writer{ raw: w, - zlib: zlib.NewWriter(w), + zlib: zlib, } } @@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash { // Calling Close does not close the wrapped io.Writer originally passed to // NewWriter. func (w *Writer) Close() error { + defer sync.PutZlibWriter(w.zlib) if err := w.zlib.Close(); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/BUILD.bazel index 27334fcd..58135b9a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/BUILD.bazel @@ -25,8 +25,10 @@ go_library( "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/cache", "//vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile", + "//vendor/github.com/go-git/go-git/v5/plumbing/hash", "//vendor/github.com/go-git/go-git/v5/plumbing/storer", "//vendor/github.com/go-git/go-git/v5/utils/binary", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", + "//vendor/github.com/go-git/go-git/v5/utils/sync", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go index df423ad5..36c5ef5b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go @@ -1,10 +1,7 @@ package packfile import ( - "bytes" - "compress/zlib" "io" - "sync" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" @@ -61,18 +58,3 @@ func WritePackfileToObjectStorage( return err } - -var bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} - -var zlibReaderPool = sync.Pool{ - New: func() interface{} { - r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) - return r - }, -} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go index 1951b34e..8898e583 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go @@ -5,6 +5,7 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) // See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and @@ -16,8 +17,11 @@ const ( s = 16 // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 - // Max size of a copy operation (64KB) + // Max size of a copy operation (64KB). maxCopySize = 64 * 1024 + + // Min size of a copy operation. + minCopySize = 4 ) // GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, @@ -43,18 +47,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin defer ioutil.CheckClose(tr, &err) - bb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(bb) - bb.Reset() + bb := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(bb) _, err = bb.ReadFrom(br) if err != nil { return nil, err } - tb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(tb) - tb.Reset() + tb := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(tb) _, err = tb.ReadFrom(tr) if err != nil { @@ -80,9 +82,8 @@ func DiffDelta(src, tgt []byte) []byte { } func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) buf.Write(deltaEncodeSize(len(src))) buf.Write(deltaEncodeSize(len(tgt))) @@ -90,9 +91,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { index.init(src) } - ibuf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(ibuf) - ibuf.Reset() + ibuf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(ibuf) for i := 0; i < len(tgt); i++ { offset, l := index.findMatch(src, tgt, i) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go index 5501f886..804f5a87 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go @@ -2,11 +2,11 @@ package packfile import ( "compress/zlib" - "crypto/sha1" "fmt" "io" "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/binary" "github.com/go-git/go-git/v5/utils/ioutil" @@ -28,7 +28,7 @@ type Encoder struct { // OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { h := plumbing.Hasher{ - Hash: sha1.New(), + Hash: hash.New(hash.CryptoType), } mw := io.MultiWriter(w, h) ow := newOffsetWriter(mw) @@ -131,11 +131,7 @@ func (e *Encoder) entry(o *ObjectToPack) (err error) { defer ioutil.CheckClose(or, &err) _, err = io.Copy(e.zw, or) - if err != nil { - return err - } - - return nil + return err } func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go index c5edaf52..238339da 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go @@ -7,19 +7,20 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/utils/ioutil" ) // FSObject is an object from the packfile on the filesystem. type FSObject struct { - hash plumbing.Hash - h *ObjectHeader - offset int64 - size int64 - typ plumbing.ObjectType - index idxfile.Index - fs billy.Filesystem - path string - cache cache.Object + hash plumbing.Hash + offset int64 + size int64 + typ plumbing.ObjectType + index idxfile.Index + fs billy.Filesystem + path string + cache cache.Object + largeObjectThreshold int64 } // NewFSObject creates a new filesystem object. @@ -32,16 +33,18 @@ func NewFSObject( fs billy.Filesystem, path string, cache cache.Object, + largeObjectThreshold int64, ) *FSObject { return &FSObject{ - hash: hash, - offset: offset, - size: contentSize, - typ: finalType, - index: index, - fs: fs, - path: path, - cache: cache, + hash: hash, + offset: offset, + size: contentSize, + typ: finalType, + index: index, + fs: fs, + path: path, + cache: cache, + largeObjectThreshold: largeObjectThreshold, } } @@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) { return nil, err } - p := NewPackfileWithCache(o.index, nil, f, o.cache) + p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold) + if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold { + // We have a big object + h, err := p.objectHeaderAtOffset(o.offset) + if err != nil { + return nil, err + } + + r, err := p.getReaderDirect(h) + if err != nil { + _ = f.Close() + return nil, err + } + return ioutil.NewReadCloserWithCloser(r, f.Close), nil + } r, err := p.getObjectContent(o.offset) if err != nil { _ = f.Close() @@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType { func (o *FSObject) Writer() (io.WriteCloser, error) { return nil, nil } - -type objectReader struct { - io.ReadCloser - f billy.File -} - -func (r *objectReader) Close() error { - if err := r.ReadCloser.Close(); err != nil { - _ = r.f.Close() - return err - } - - return r.f.Close() -} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go index ddd7f62f..68527022 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go @@ -2,6 +2,7 @@ package packfile import ( "bytes" + "fmt" "io" "os" @@ -11,6 +12,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/format/idxfile" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024 // Packfile allows retrieving information from inside a packfile. type Packfile struct { idxfile.Index - fs billy.Filesystem - file billy.File - s *Scanner - deltaBaseCache cache.Object - offsetToType map[int64]plumbing.ObjectType + fs billy.Filesystem + file billy.File + s *Scanner + deltaBaseCache cache.Object + offsetToType map[int64]plumbing.ObjectType + largeObjectThreshold int64 } // NewPackfileWithCache creates a new Packfile with the given object cache. @@ -50,6 +53,7 @@ func NewPackfileWithCache( fs billy.Filesystem, file billy.File, cache cache.Object, + largeObjectThreshold int64, ) *Packfile { s := NewScanner(file) return &Packfile{ @@ -59,6 +63,7 @@ func NewPackfileWithCache( s, cache, make(map[int64]plumbing.ObjectType), + largeObjectThreshold, } } @@ -66,8 +71,8 @@ func NewPackfileWithCache( // and packfile idx. // If the filesystem is provided, the packfile will return FSObjects, otherwise // it will return MemoryObjects. -func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { - return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) +func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile { + return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold) } // Get retrieves the encoded object in the packfile with the given hash. @@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: return h.Length, nil case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) if _, _, err := p.s.NextObject(buf); err != nil { return 0, err @@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing. // For delta objects we read the delta data and apply the small object // optimization only if the expanded version of the object still meets // the small object threshold condition. - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) + if _, _, err := p.s.NextObject(buf); err != nil { return nil, err } @@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing. p.fs, p.file.Name(), p.deltaBaseCache, + p.largeObjectThreshold, ), nil } @@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { return obj.Reader() } +func asyncReader(p *Packfile) (io.ReadCloser, error) { + reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset) + zr, err := sync.GetZlibReader(reader) + if err != nil { + return nil, fmt.Errorf("zlib reset error: %s", err) + } + + return ioutil.NewReadCloserWithCloser(zr.Reader, func() error { + sync.PutZlibReader(zr) + return nil + }), nil + +} + +func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return asyncReader(p) + case plumbing.REFDeltaObject: + deltaRc, err := asyncReader(p) + if err != nil { + return nil, err + } + r, err := p.readREFDeltaObjectContent(h, deltaRc) + if err != nil { + return nil, err + } + return r, nil + case plumbing.OFSDeltaObject: + deltaRc, err := asyncReader(p) + if err != nil { + return nil, err + } + r, err := p.readOFSDeltaObjectContent(h, deltaRc) + if err != nil { + return nil, err + } + return r, nil + default: + return nil, ErrInvalidObject.AddDetails("type %q", h.Type) + } +} + func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { var obj = new(plumbing.MemoryObject) obj.SetSize(h.Length) @@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err } func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) + _, _, err := p.s.NextObject(buf) if err != nil { return err @@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) } +func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) { + var err error + + base, ok := p.cacheGet(h.Reference) + if !ok { + base, err = p.Get(h.Reference) + if err != nil { + return nil, err + } + } + + return ReaderFromDelta(base, deltaRC) +} + func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { var err error @@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec } func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) + _, _, err := p.s.NextObject(buf) if err != nil { return err @@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) } +func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) { + hash, err := p.FindHash(h.OffsetReference) + if err != nil { + return nil, err + } + + base, err := p.objectAtOffset(h.OffsetReference, hash) + if err != nil { + return nil, err + } + + return ReaderFromDelta(base, deltaRC) +} + func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { hash, err := p.FindHash(offset) if err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go index 4b5a5708..62f1d13c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go @@ -3,13 +3,14 @@ package packfile import ( "bytes" "errors" + "fmt" "io" - stdioutil "io/ioutil" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -46,7 +47,6 @@ type Parser struct { oi []*objectInfo oiByHash map[plumbing.Hash]*objectInfo oiByOffset map[int64]*objectInfo - hashOffset map[plumbing.Hash]int64 checksum plumbing.Hash cache *cache.BufferLRU @@ -175,12 +175,25 @@ func (p *Parser) init() error { return nil } +type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error + +type lazyObjectWriter interface { + // LazyWriter enables an object to be lazily written. + // It returns: + // - w: a writer to receive the object's content. + // - lwh: a func to write the object header. + // - err: any error from the initial writer creation process. + // + // Note that if the object header is not written BEFORE the writer + // is used, this will result in an invalid object. + LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error) +} + func (p *Parser) indexObjects() error { - buf := new(bytes.Buffer) + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) for i := uint32(0); i < p.count; i++ { - buf.Reset() - oh, err := p.scanner.NextObjectHeader() if err != nil { return err @@ -220,39 +233,76 @@ func (p *Parser) indexObjects() error { ota = newBaseObject(oh.Offset, oh.Length, t) } - _, crc, err := p.scanner.NextObject(buf) + hasher := plumbing.NewHasher(oh.Type, oh.Length) + writers := []io.Writer{hasher} + var obj *plumbing.MemoryObject + + // Lazy writing is only available for non-delta objects. + if p.storage != nil && !delta { + // When a storage is set and supports lazy writing, + // use that instead of creating a memory object. + if low, ok := p.storage.(lazyObjectWriter); ok { + ow, lwh, err := low.LazyWriter() + if err != nil { + return err + } + + if err = lwh(oh.Type, oh.Length); err != nil { + return err + } + + defer ow.Close() + writers = append(writers, ow) + } else { + obj = new(plumbing.MemoryObject) + obj.SetSize(oh.Length) + obj.SetType(oh.Type) + + writers = append(writers, obj) + } + } + if delta && !p.scanner.IsSeekable { + buf.Reset() + buf.Grow(int(oh.Length)) + writers = append(writers, buf) + } + + mw := io.MultiWriter(writers...) + + _, crc, err := p.scanner.NextObject(mw) if err != nil { return err } + // Non delta objects needs to be added into the storage. This + // is only required when lazy writing is not supported. + if obj != nil { + if _, err := p.storage.SetEncodedObject(obj); err != nil { + return err + } + } + ota.Crc32 = crc ota.Length = oh.Length - data := buf.Bytes() if !delta { - sha1, err := getSHA1(ota.Type, data) - if err != nil { - return err + sha1 := hasher.Sum() + + // Move children of placeholder parent into actual parent, in case this + // was a non-external delta reference. + if placeholder, ok := p.oiByHash[sha1]; ok { + ota.Children = placeholder.Children + for _, c := range ota.Children { + c.Parent = ota + } } ota.SHA1 = sha1 p.oiByHash[ota.SHA1] = ota } - if p.storage != nil && !delta { - obj := new(plumbing.MemoryObject) - obj.SetSize(oh.Length) - obj.SetType(oh.Type) - if _, err := obj.Write(data); err != nil { - return err - } - - if _, err := p.storage.SetEncodedObject(obj); err != nil { - return err - } - } - if delta && !p.scanner.IsSeekable { + data := buf.Bytes() p.deltas[oh.Offset] = make([]byte, len(data)) copy(p.deltas[oh.Offset], data) } @@ -265,28 +315,37 @@ func (p *Parser) indexObjects() error { } func (p *Parser) resolveDeltas() error { - buf := &bytes.Buffer{} + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) + for _, obj := range p.oi { buf.Reset() + buf.Grow(int(obj.Length)) err := p.get(obj, buf) if err != nil { return err } - content := buf.Bytes() if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { return err } - if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { + if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil { return err } if !obj.IsDelta() && len(obj.Children) > 0 { + // Dealing with an io.ReaderAt object, means we can + // create it once and reuse across all children. + r := bytes.NewReader(buf.Bytes()) for _, child := range obj.Children { - if err := p.resolveObject(stdioutil.Discard, child, content); err != nil { + // Even though we are discarding the output, we still need to read it to + // so that the scanner can advance to the next object, and the SHA1 can be + // calculated. + if err := p.resolveObject(io.Discard, child, r); err != nil { return err } + p.resolveExternalRef(child) } // Remove the delta from the cache. @@ -299,6 +358,16 @@ func (p *Parser) resolveDeltas() error { return nil } +func (p *Parser) resolveExternalRef(o *objectInfo) { + if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef { + p.oiByHash[o.SHA1] = o + o.Children = ref.Children + for _, c := range o.Children { + c.Parent = o + } + } +} + func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { if !o.ExternalRef { // skip cache check for placeholder parents b, ok := p.cache.Get(o.Offset) @@ -336,16 +405,15 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { } if o.DiskType.IsDelta() { - b := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(b) - b.Reset() + b := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(b) + buf.Grow(int(o.Length)) err := p.get(o.Parent, b) if err != nil { return err } - base := b.Bytes() - err = p.resolveObject(buf, o, base) + err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes())) if err != nil { return err } @@ -356,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { } } + // If the scanner is seekable, caching this data into + // memory by offset seems wasteful. + // There is a trade-off to be considered here in terms + // of execution time vs memory consumption. + // + // TODO: improve seekable execution time, so that we can + // skip this cache. if len(o.Children) > 0 { data := make([]byte, buf.Len()) copy(data, buf.Bytes()) @@ -364,41 +439,75 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { return nil } +// resolveObject resolves an object from base, using information +// provided by o. +// +// This call has the side-effect of changing field values +// from the object info o: +// - Type: OFSDeltaObject may become the target type (e.g. Blob). +// - Size: The size may be update with the target size. +// - Hash: Zero hashes will be calculated as part of the object +// resolution. Hence why this process can't be avoided even when w +// is an io.Discard. +// +// base must be an io.ReaderAt, which is a requirement from +// patchDeltaStream. The main reason being that reversing an +// delta object may lead to going backs and forths within base, +// which is not supported by io.Reader. func (p *Parser) resolveObject( w io.Writer, o *objectInfo, - base []byte, + base io.ReaderAt, ) error { if !o.DiskType.IsDelta() { return nil } - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) err := p.readData(buf, o) if err != nil { return err } - data := buf.Bytes() - data, err = applyPatchBase(o, data, base) + writers := []io.Writer{w} + var obj *plumbing.MemoryObject + var lwh objectHeaderWriter + + if p.storage != nil { + if low, ok := p.storage.(lazyObjectWriter); ok { + ow, wh, err := low.LazyWriter() + if err != nil { + return err + } + lwh = wh + + defer ow.Close() + writers = append(writers, ow) + } else { + obj = new(plumbing.MemoryObject) + ow, err := obj.Writer() + if err != nil { + return err + } + + writers = append(writers, ow) + } + } + + mw := io.MultiWriter(writers...) + + err = applyPatchBase(o, base, buf, mw, lwh) if err != nil { return err } - if p.storage != nil { - obj := new(plumbing.MemoryObject) - obj.SetSize(o.Size()) + if obj != nil { obj.SetType(o.Type) - if _, err := obj.Write(data); err != nil { - return err - } - + obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase. if _, err := p.storage.SetEncodedObject(obj); err != nil { return err } } - _, err = w.Write(data) return err } @@ -422,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error { return nil } -func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { - patched, err := PatchDelta(base, data) +// applyPatchBase applies the patch to target. +// +// Note that ota will be updated based on the description in resolveObject. +func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error { + if target == nil { + return fmt.Errorf("cannot apply patch against nil target") + } + + typ := ota.Type + if ota.SHA1 == plumbing.ZeroHash { + typ = ota.Parent.Type + } + + sz, h, err := patchDeltaWriter(target, base, delta, typ, wh) if err != nil { - return nil, err + return err } if ota.SHA1 == plumbing.ZeroHash { - ota.Type = ota.Parent.Type - sha1, err := getSHA1(ota.Type, patched) - if err != nil { - return nil, err - } - - ota.SHA1 = sha1 - ota.Length = int64(len(patched)) + ota.Type = typ + ota.Length = int64(sz) + ota.SHA1 = h } - return patched, nil + return nil } func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go index 9e90f30a..960769c7 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go @@ -1,12 +1,16 @@ package packfile import ( + "bufio" "bytes" "errors" + "fmt" "io" + "math" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) // See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h @@ -14,7 +18,33 @@ import ( // and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js // for details about the delta format. -const deltaSizeMin = 4 +var ( + ErrInvalidDelta = errors.New("invalid delta") + ErrDeltaCmd = errors.New("wrong delta command") +) + +const ( + payload = 0x7f // 0111 1111 + continuation = 0x80 // 1000 0000 +) + +type offset struct { + mask byte + shift uint +} + +var offsets = []offset{ + {mask: 0x01, shift: 0}, + {mask: 0x02, shift: 8}, + {mask: 0x04, shift: 16}, + {mask: 0x08, shift: 24}, +} + +var sizes = []offset{ + {mask: 0x10, shift: 0}, + {mask: 0x20, shift: 8}, + {mask: 0x40, shift: 16}, +} // ApplyDelta writes to target the result of applying the modification deltas in delta to base. func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { @@ -32,18 +62,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { defer ioutil.CheckClose(w, &err) - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() + buf := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(buf) _, err = buf.ReadFrom(r) if err != nil { return err } src := buf.Bytes() - dst := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(dst) - dst.Reset() + dst := sync.GetBytesBuffer() + defer sync.PutBytesBuffer(dst) err = patchDelta(dst, src, delta) if err != nil { return err @@ -51,17 +79,12 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { target.SetSize(int64(dst.Len())) - b := byteSlicePool.Get().([]byte) - _, err = io.CopyBuffer(w, dst, b) - byteSlicePool.Put(b) + b := sync.GetByteSlice() + _, err = io.CopyBuffer(w, dst, *b) + sync.PutByteSlice(b) return err } -var ( - ErrInvalidDelta = errors.New("invalid delta") - ErrDeltaCmd = errors.New("wrong delta command") -) - // PatchDelta returns the result of applying the modification deltas in delta to src. // An error will be returned if delta is corrupted (ErrDeltaLen) or an action command // is not copy from source or copy from delta (ErrDeltaCmd). @@ -73,8 +96,137 @@ func PatchDelta(src, delta []byte) ([]byte, error) { return b.Bytes(), nil } +func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) { + deltaBuf := bufio.NewReaderSize(deltaRC, 1024) + srcSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return nil, ErrInvalidDelta + } + return nil, err + } + if srcSz != uint(base.Size()) { + return nil, ErrInvalidDelta + } + + targetSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return nil, ErrInvalidDelta + } + return nil, err + } + remainingTargetSz := targetSz + + dstRd, dstWr := io.Pipe() + + go func() { + baseRd, err := base.Reader() + if err != nil { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + defer baseRd.Close() + + baseBuf := bufio.NewReader(baseRd) + basePos := uint(0) + + for { + cmd, err := deltaBuf.ReadByte() + if err == io.EOF { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + + switch { + case isCopyFromSrc(cmd): + offset, err := decodeOffsetByteReader(cmd, deltaBuf) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + sz, err := decodeSizeByteReader(cmd, deltaBuf) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + _ = dstWr.Close() + return + } + + discard := offset - basePos + if basePos > offset { + _ = baseRd.Close() + baseRd, err = base.Reader() + if err != nil { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + baseBuf.Reset(baseRd) + discard = offset + } + for discard > math.MaxInt32 { + n, err := baseBuf.Discard(math.MaxInt32) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + basePos += uint(n) + discard -= uint(n) + } + for discard > 0 { + n, err := baseBuf.Discard(int(discard)) + if err != nil { + _ = dstWr.CloseWithError(err) + return + } + basePos += uint(n) + discard -= uint(n) + } + if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil { + _ = dstWr.CloseWithError(err) + return + } + remainingTargetSz -= sz + basePos += sz + + case isCopyFromDelta(cmd): + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + _ = dstWr.CloseWithError(ErrInvalidDelta) + return + } + if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil { + _ = dstWr.CloseWithError(err) + return + } + + remainingTargetSz -= sz + + default: + _ = dstWr.CloseWithError(ErrDeltaCmd) + return + } + + if remainingTargetSz <= 0 { + _ = dstWr.Close() + return + } + } + }() + + return dstRd, nil +} + func patchDelta(dst *bytes.Buffer, src, delta []byte) error { - if len(delta) < deltaSizeMin { + if len(delta) < minCopySize { return ErrInvalidDelta } @@ -95,7 +247,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error { cmd = delta[0] delta = delta[1:] - if isCopyFromSrc(cmd) { + + switch { + case isCopyFromSrc(cmd): var offset, sz uint var err error offset, delta, err = decodeOffset(cmd, delta) @@ -114,7 +268,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error { } dst.Write(src[offset : offset+sz]) remainingTargetSz -= sz - } else if isCopyFromDelta(cmd) { + + case isCopyFromDelta(cmd): sz := uint(cmd) // cmd is the size itself if invalidSize(sz, targetSz) { return ErrInvalidDelta @@ -127,7 +282,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error { dst.Write(delta[0:sz]) remainingTargetSz -= sz delta = delta[sz:] - } else { + + default: return ErrDeltaCmd } @@ -139,6 +295,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error { return nil } +func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader, + typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) { + deltaBuf := bufio.NewReaderSize(delta, 1024) + srcSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return 0, plumbing.ZeroHash, ErrInvalidDelta + } + return 0, plumbing.ZeroHash, err + } + + if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) { + return 0, plumbing.ZeroHash, ErrInvalidDelta + } + + targetSz, err := decodeLEB128ByteReader(deltaBuf) + if err != nil { + if err == io.EOF { + return 0, plumbing.ZeroHash, ErrInvalidDelta + } + return 0, plumbing.ZeroHash, err + } + + // If header still needs to be written, caller will provide + // a LazyObjectWriterHeader. This seems to be the case when + // dealing with thin-packs. + if writeHeader != nil { + err = writeHeader(typ, int64(targetSz)) + if err != nil { + return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err) + } + } + + remainingTargetSz := targetSz + + hasher := plumbing.NewHasher(typ, int64(targetSz)) + mw := io.MultiWriter(dst, hasher) + + bufp := sync.GetByteSlice() + defer sync.PutByteSlice(bufp) + + sr := io.NewSectionReader(base, int64(0), int64(srcSz)) + // Keep both the io.LimitedReader types, so we can reset N. + baselr := io.LimitReader(sr, 0).(*io.LimitedReader) + deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader) + + for { + buf := *bufp + cmd, err := deltaBuf.ReadByte() + if err == io.EOF { + return 0, plumbing.ZeroHash, ErrInvalidDelta + } + if err != nil { + return 0, plumbing.ZeroHash, err + } + + if isCopyFromSrc(cmd) { + offset, err := decodeOffsetByteReader(cmd, deltaBuf) + if err != nil { + return 0, plumbing.ZeroHash, err + } + sz, err := decodeSizeByteReader(cmd, deltaBuf) + if err != nil { + return 0, plumbing.ZeroHash, err + } + + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + return 0, plumbing.ZeroHash, err + } + + if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil { + return 0, plumbing.ZeroHash, err + } + baselr.N = int64(sz) + if _, err := io.CopyBuffer(mw, baselr, buf); err != nil { + return 0, plumbing.ZeroHash, err + } + remainingTargetSz -= sz + } else if isCopyFromDelta(cmd) { + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + return 0, plumbing.ZeroHash, ErrInvalidDelta + } + deltalr.N = int64(sz) + if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil { + return 0, plumbing.ZeroHash, err + } + + remainingTargetSz -= sz + } else { + return 0, plumbing.ZeroHash, err + } + if remainingTargetSz <= 0 { + break + } + } + + return targetSz, hasher.Sum(), nil +} + // Decodes a number encoded as an unsigned LEB128 at the start of some // binary data and returns the decoded number and the rest of the // stream. @@ -161,78 +418,95 @@ func decodeLEB128(input []byte) (uint, []byte) { return num, input[sz:] } -const ( - payload = 0x7f // 0111 1111 - continuation = 0x80 // 1000 0000 -) +func decodeLEB128ByteReader(input io.ByteReader) (uint, error) { + var num, sz uint + for { + b, err := input.ReadByte() + if err != nil { + return 0, err + } + + num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks + sz++ + + if uint(b)&continuation == 0 { + break + } + } + + return num, nil +} func isCopyFromSrc(cmd byte) bool { - return (cmd & 0x80) != 0 + return (cmd & continuation) != 0 } func isCopyFromDelta(cmd byte) bool { - return (cmd&0x80) == 0 && cmd != 0 + return (cmd&continuation) == 0 && cmd != 0 +} + +func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) { + var offset uint + for _, o := range offsets { + if (cmd & o.mask) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + offset |= uint(next) << o.shift + } + } + + return offset, nil } func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { var offset uint - if (cmd & 0x01) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta + for _, o := range offsets { + if (cmd & o.mask) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << o.shift + delta = delta[1:] } - offset = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x02) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x04) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 16 - delta = delta[1:] - } - if (cmd & 0x08) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 24 - delta = delta[1:] } return offset, delta, nil } +func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) { + var sz uint + for _, s := range sizes { + if (cmd & s.mask) != 0 { + next, err := delta.ReadByte() + if err != nil { + return 0, err + } + sz |= uint(next) << s.shift + } + } + + if sz == 0 { + sz = maxCopySize + } + + return sz, nil +} + func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { var sz uint - if (cmd & 0x10) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta + for _, s := range sizes { + if (cmd & s.mask) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz |= uint(delta[0]) << s.shift + delta = delta[1:] } - sz = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x20) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x40) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 16 - delta = delta[1:] } if sz == 0 { - sz = 0x10000 + sz = maxCopySize } return sz, delta, nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go index 6e6a6878..730343ee 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go @@ -3,17 +3,15 @@ package packfile import ( "bufio" "bytes" - "compress/zlib" "fmt" "hash" "hash/crc32" "io" - stdioutil "io/ioutil" - "sync" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/utils/binary" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -114,7 +112,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) { return } -// readSignature reads an returns the signature field in the packfile. +// readSignature reads a returns the signature field in the packfile. func (s *Scanner) readSignature() ([]byte, error) { var sig = make([]byte, 4) if _, err := io.ReadFull(s.r, sig); err != nil { @@ -243,7 +241,7 @@ func (s *Scanner) discardObjectIfNeeded() error { } h := s.pendingObject - n, _, err := s.NextObject(stdioutil.Discard) + n, _, err := s.NextObject(io.Discard) if err != nil { return err } @@ -320,29 +318,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro return } +// ReadObject returns a reader for the object content and an error +func (s *Scanner) ReadObject() (io.ReadCloser, error) { + s.pendingObject = nil + zr, err := sync.GetZlibReader(s.r) + + if err != nil { + return nil, fmt.Errorf("zlib reset error: %s", err) + } + + return ioutil.NewReadCloserWithCloser(zr.Reader, func() error { + sync.PutZlibReader(zr) + return nil + }), nil +} + // ReadRegularObject reads and write a non-deltified object // from it zlib stream in an object entry in the packfile. func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { - zr := zlibReaderPool.Get().(io.ReadCloser) - defer zlibReaderPool.Put(zr) + zr, err := sync.GetZlibReader(s.r) + defer sync.PutZlibReader(zr) - if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { + if err != nil { return 0, fmt.Errorf("zlib reset error: %s", err) } - defer ioutil.CheckClose(zr, &err) - buf := byteSlicePool.Get().([]byte) - n, err = io.CopyBuffer(w, zr, buf) - byteSlicePool.Put(buf) + defer ioutil.CheckClose(zr.Reader, &err) + buf := sync.GetByteSlice() + n, err = io.CopyBuffer(w, zr.Reader, *buf) + sync.PutByteSlice(buf) return } -var byteSlicePool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - // SeekFromStart sets a new offset from start, returns the old position before // the change. func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { @@ -372,9 +379,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) { // Close reads the reader until io.EOF func (s *Scanner) Close() error { - buf := byteSlicePool.Get().([]byte) - _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) - byteSlicePool.Put(buf) + buf := sync.GetByteSlice() + _, err := io.CopyBuffer(io.Discard, s.r, *buf) + sync.PutByteSlice(buf) + return err } @@ -384,13 +392,13 @@ func (s *Scanner) Flush() error { } // scannerReader has the following characteristics: -// - Provides an io.SeekReader impl for bufio.Reader, when the underlying -// reader supports it. -// - Keeps track of the current read position, for when the underlying reader -// isn't an io.SeekReader, but we still want to know the current offset. -// - Writes to the hash writer what it reads, with the aid of a smaller buffer. -// The buffer helps avoid a performance penality for performing small writes -// to the crc32 hash writer. +// - Provides an io.SeekReader impl for bufio.Reader, when the underlying +// reader supports it. +// - Keeps track of the current read position, for when the underlying reader +// isn't an io.SeekReader, but we still want to know the current offset. +// - Writes to the hash writer what it reads, with the aid of a smaller buffer. +// The buffer helps avoid a performance penalty for performing small writes +// to the crc32 hash writer. type scannerReader struct { reader io.Reader crc io.Writer diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/BUILD.bazel index 0ed96438..129860df 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/BUILD.bazel @@ -4,9 +4,11 @@ go_library( name = "pktline", srcs = [ "encoder.go", + "error.go", "scanner.go", ], importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline", importpath = "github.com/go-git/go-git/v5/plumbing/format/pktline", visibility = ["//visibility:public"], + deps = ["//vendor/github.com/go-git/go-git/v5/utils/trace"], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go index 6d409795..b6144faf 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "io" + + "github.com/go-git/go-git/v5/utils/trace" ) // An Encoder writes pkt-lines to an output stream. @@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder { // Flush encodes a flush-pkt to the output stream. func (e *Encoder) Flush() error { + defer trace.Packet.Print("packet: > 0000") _, err := e.w.Write(FlushPkt) return err } @@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error { } n := len(p) + 4 + defer trace.Packet.Printf("packet: > %04x %s", n, p) if _, err := e.w.Write(asciiHex16(n)); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go new file mode 100644 index 00000000..2c0e5a72 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go @@ -0,0 +1,51 @@ +package pktline + +import ( + "bytes" + "errors" + "io" + "strings" +) + +var ( + // ErrInvalidErrorLine is returned by Decode when the packet line is not an + // error line. + ErrInvalidErrorLine = errors.New("expected an error-line") + + errPrefix = []byte("ERR ") +) + +// ErrorLine is a packet line that contains an error message. +// Once this packet is sent by client or server, the data transfer process is +// terminated. +// See https://git-scm.com/docs/pack-protocol#_pkt_line_format +type ErrorLine struct { + Text string +} + +// Error implements the error interface. +func (e *ErrorLine) Error() string { + return e.Text +} + +// Encode encodes the ErrorLine into a packet line. +func (e *ErrorLine) Encode(w io.Writer) error { + p := NewEncoder(w) + return p.Encodef("%s%s\n", string(errPrefix), e.Text) +} + +// Decode decodes a packet line into an ErrorLine. +func (e *ErrorLine) Decode(r io.Reader) error { + s := NewScanner(r) + if !s.Scan() { + return s.Err() + } + + line := s.Bytes() + if !bytes.HasPrefix(line, errPrefix) { + return ErrInvalidErrorLine + } + + e.Text = strings.TrimSpace(string(line[4:])) + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go index 99aab46e..fbb137de 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go @@ -1,8 +1,12 @@ package pktline import ( + "bytes" "errors" "io" + "strings" + + "github.com/go-git/go-git/v5/utils/trace" ) const ( @@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool { return false } s.payload = s.payload[:l] + trace.Packet.Printf("packet: < %04x %s", l, s.payload) + + if bytes.HasPrefix(s.payload, errPrefix) { + s.err = &ErrorLine{ + Text: strings.TrimSpace(string(s.payload[4:])), + } + return false + } return true } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go index afc602a9..39bb73fb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go @@ -2,15 +2,15 @@ package plumbing import ( "bytes" - "crypto/sha1" "encoding/hex" - "hash" "sort" "strconv" + + "github.com/go-git/go-git/v5/plumbing/hash" ) // Hash SHA1 hashed content -type Hash [20]byte +type Hash [hash.Size]byte // ZeroHash is Hash with value zero var ZeroHash Hash @@ -46,7 +46,7 @@ type Hasher struct { } func NewHasher(t ObjectType, size int64) Hasher { - h := Hasher{sha1.New()} + h := Hasher{hash.New(hash.CryptoType)} h.Write(t.Bytes()) h.Write([]byte(" ")) h.Write([]byte(strconv.FormatInt(size, 10))) @@ -74,10 +74,11 @@ func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // IsHash returns true if the given string is a valid hash. func IsHash(s string) bool { - if len(s) != 40 { + switch len(s) { + case hash.HexSize: + _, err := hex.DecodeString(s) + return err == nil + default: return false } - - _, err := hex.DecodeString(s) - return err == nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/hash/BUILD.bazel new file mode 100644 index 00000000..edecdfc1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "hash", + srcs = [ + "hash.go", + "hash_sha1.go", + ], + importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/plumbing/hash", + importpath = "github.com/go-git/go-git/v5/plumbing/hash", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/pjbgf/sha1cd"], +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go new file mode 100644 index 00000000..8609848f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go @@ -0,0 +1,60 @@ +// package hash provides a way for managing the +// underlying hash implementations used across go-git. +package hash + +import ( + "crypto" + "fmt" + "hash" + + "github.com/pjbgf/sha1cd" +) + +// algos is a map of hash algorithms. +var algos = map[crypto.Hash]func() hash.Hash{} + +func init() { + reset() +} + +// reset resets the default algos value. Can be used after running tests +// that registers new algorithms to avoid side effects. +func reset() { + algos[crypto.SHA1] = sha1cd.New + algos[crypto.SHA256] = crypto.SHA256.New +} + +// RegisterHash allows for the hash algorithm used to be overridden. +// This ensures the hash selection for go-git must be explicit, when +// overriding the default value. +func RegisterHash(h crypto.Hash, f func() hash.Hash) error { + if f == nil { + return fmt.Errorf("cannot register hash: f is nil") + } + + switch h { + case crypto.SHA1: + algos[h] = f + case crypto.SHA256: + algos[h] = f + default: + return fmt.Errorf("unsupported hash function: %v", h) + } + return nil +} + +// Hash is the same as hash.Hash. This allows consumers +// to not having to import this package alongside "hash". +type Hash interface { + hash.Hash +} + +// New returns a new Hash for the given hash function. +// It panics if the hash function is not registered. +func New(h crypto.Hash) Hash { + hh, ok := algos[h] + if !ok { + panic(fmt.Sprintf("hash algorithm not registered: %v", h)) + } + return hh() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go new file mode 100644 index 00000000..e3cb60fe --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go @@ -0,0 +1,15 @@ +//go:build !sha256 +// +build !sha256 + +package hash + +import "crypto" + +const ( + // CryptoType defines what hash algorithm is being used. + CryptoType = crypto.SHA1 + // Size defines the amount of bytes the hash yields. + Size = 20 + // HexSize defines the strings size of the hash when represented in hexadecimal. + HexSize = 40 +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go new file mode 100644 index 00000000..1c52b897 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go @@ -0,0 +1,15 @@ +//go:build sha256 +// +build sha256 + +package hash + +import "crypto" + +const ( + // CryptoType defines what hash algorithm is being used. + CryptoType = crypto.SHA256 + // Size defines the amount of bytes the hash yields. + Size = 32 + // HexSize defines the strings size of the hash when represented in hexadecimal. + HexSize = 64 +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go index 21337cc0..6d11271d 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go @@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash { return o.h } -// Type return the ObjectType +// Type returns the ObjectType func (o *MemoryObject) Type() ObjectType { return o.t } // SetType sets the ObjectType func (o *MemoryObject) SetType(t ObjectType) { o.t = t } -// Size return the size of the object +// Size returns the size of the object func (o *MemoryObject) Size() int64 { return o.sz } // SetSize set the object size, a content of the given size should be written diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object.go index 2655dee4..3ee9de9f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object.go @@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool { return t >= CommitObject && t <= REFDeltaObject } -// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. +// IsDelta returns true for any ObjectType that represents a delta (i.e. // REFDeltaObject or OFSDeltaObject). func (t ObjectType) IsDelta() bool { return t == REFDeltaObject || t == OFSDeltaObject diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/object/BUILD.bazel index e9a16b7d..acb285de 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/BUILD.bazel @@ -13,13 +13,13 @@ go_library( "commit_walker_ctime.go", "commit_walker_limit.go", "commit_walker_path.go", - "common.go", "difftree.go", "file.go", "merge_base.go", "object.go", "patch.go", "rename.go", + "signature.go", "tag.go", "tree.go", "treenoder.go", @@ -39,7 +39,8 @@ go_library( "//vendor/github.com/go-git/go-git/v5/utils/ioutil", "//vendor/github.com/go-git/go-git/v5/utils/merkletrie", "//vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder", + "//vendor/github.com/go-git/go-git/v5/utils/sync", "//vendor/github.com/sergi/go-diff/diffmatchpatch", - "//vendor/golang.org/x/crypto/openpgp", + "@com_github_protonmail_go_crypto//openpgp", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go index 8b119bc9..3c619df8 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go @@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) { return merkletrie.Modify, nil } -// Files return the files before and after a change. +// Files returns the files before and after a change. // For insertions from will be nil. For deletions to will be nil. func (c *Change) Files() (from, to *File, err error) { action, err := c.Action() diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go index f7011882..b96ee84d 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go @@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) { var err error if ret.From, err = newChangeEntry(c.From); err != nil { - return nil, fmt.Errorf("From field: %s", err) + return nil, fmt.Errorf("from field: %s", err) } if ret.To, err = newChangeEntry(c.To); err != nil { - return nil, fmt.Errorf("To field: %s", err) + return nil, fmt.Errorf("to field: %s", err) } return ret, nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index 98664a1e..3d096e18 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -1,7 +1,6 @@ package object import ( - "bufio" "bytes" "context" "errors" @@ -9,22 +8,34 @@ import ( "io" "strings" - "golang.org/x/crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) const ( - beginpgp string = "-----BEGIN PGP SIGNATURE-----" - endpgp string = "-----END PGP SIGNATURE-----" - headerpgp string = "gpgsig" + beginpgp string = "-----BEGIN PGP SIGNATURE-----" + endpgp string = "-----END PGP SIGNATURE-----" + headerpgp string = "gpgsig" + headerencoding string = "encoding" + + // https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153 + // When a merge commit is created from a signed tag, the tag is embedded in + // the commit with the "mergetag" header. + headermergetag string = "mergetag" + + defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8" ) // Hash represents the hash of an object type Hash plumbing.Hash +// MessageEncoding represents the encoding of a commit +type MessageEncoding string + // Commit points to a single tree, marking it as what the project looked like // at a certain point in time. It contains meta-information about that point // in time, such as a timestamp, the author of the changes since the last @@ -38,6 +49,9 @@ type Commit struct { // Committer is the one performing the commit, might be different from // Author. Committer Signature + // MergeTag is the embedded tag object when a merge commit is created by + // merging a signed tag. + MergeTag string // PGPSignature is the PGP signature of the commit. PGPSignature string // Message is the commit message, contains arbitrary text. @@ -46,6 +60,8 @@ type Commit struct { TreeHash plumbing.Hash // ParentHashes are the hashes of the parent commits of the commit. ParentHashes []plumbing.Hash + // Encoding is the encoding of the commit. + Encoding MessageEncoding s storer.EncodedObjectStorer } @@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } c.Hash = o.Hash() + c.Encoding = defaultUtf8CommitMessageEncoding reader, err := o.Reader() if err != nil { @@ -180,11 +197,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) + r := sync.GetBufioReader(reader) + defer sync.PutBufioReader(r) var message bool + var mergetag bool var pgpsig bool var msgbuf bytes.Buffer for { @@ -193,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { return err } + if mergetag { + if len(line) > 0 && line[0] == ' ' { + line = bytes.TrimLeft(line, " ") + c.MergeTag += string(line) + continue + } else { + mergetag = false + } + } + if pgpsig { if len(line) > 0 && line[0] == ' ' { line = bytes.TrimLeft(line, " ") @@ -226,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { c.Author.Decode(data) case "committer": c.Committer.Decode(data) + case headermergetag: + c.MergeTag += string(data) + "\n" + mergetag = true + case headerencoding: + c.Encoding = MessageEncoding(data) case headerpgp: c.PGPSignature += string(data) + "\n" pgpsig = true @@ -287,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { return err } + if c.MergeTag != "" { + if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil { + return err + } + + // Split tag information lines and re-write with a left padding and + // newline. Use join for this so it's clear that a newline should not be + // added after this section. The newline will be added either as part of + // the PGP signature or the commit message. + mergetag := strings.TrimSuffix(c.MergeTag, "\n") + lines := strings.Split(mergetag, "\n") + if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { + return err + } + } + + if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding { + if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil { + return err + } + } + if c.PGPSignature != "" && includeSig { if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { return err @@ -374,7 +428,18 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { return nil, err } - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) +} + +// Less defines a compare function to determine which commit is 'earlier' by: +// - First use Committer.When +// - If Committer.When are equal then use Author.When +// - If Author.When also equal then compare the string value of the hash +func (c *Commit) Less(rhs *Commit) bool { + return c.Committer.When.Before(rhs.Committer.When) || + (c.Committer.When.Equal(rhs.Committer.When) && + (c.Author.When.Before(rhs.Author.When) || + (c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0))) } func indent(t string) string { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go index aa0ca15f..c1ec8ba7 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go @@ -57,6 +57,8 @@ func (c *commitPathIter) Next() (*Commit, error) { } func (c *commitPathIter) getNextFileCommit() (*Commit, error) { + var parentTree, currentTree *Tree + for { // Parent-commit can be nil if the current-commit is the initial commit parentCommit, parentCommitErr := c.sourceIter.Next() @@ -68,13 +70,17 @@ func (c *commitPathIter) getNextFileCommit() (*Commit, error) { parentCommit = nil } - // Fetch the trees of the current and parent commits - currentTree, currTreeErr := c.currentCommit.Tree() - if currTreeErr != nil { - return nil, currTreeErr + if parentTree == nil { + var currTreeErr error + currentTree, currTreeErr = c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + } else { + currentTree = parentTree + parentTree = nil } - var parentTree *Tree if parentCommit != nil { var parentTreeErr error parentTree, parentTreeErr = parentCommit.Tree() @@ -115,7 +121,8 @@ func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { // filename matches, now check if source iterator contains all commits (from all refs) if c.checkParent { - if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + // Check if parent is beyond the initial commit + if parent == nil || isParentHash(parent.Hash, c.currentCommit) { return true } continue diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go deleted file mode 100644 index 3591f5f0..00000000 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go +++ /dev/null @@ -1,12 +0,0 @@ -package object - -import ( - "bufio" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewReader(nil) - }, -} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go index 9b5f438c..3c61f626 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" "io" - "math" + "strconv" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro } -func filePatch(c *Change) (fdiff.FilePatch, error) { - return filePatchWithContext(context.Background(), c) -} - func fileContent(f *File) (content string, isBinary bool, err error) { if f == nil { return @@ -238,61 +234,56 @@ func (fileStats FileStats) String() string { return printStat(fileStats) } +// printStat prints the stats of changes in content of files. +// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615 +// Parts of the output: +// |<+++/---> +// example: " main.go | 10 +++++++--- " func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 + maxGraphWidth := uint(53) + maxNameLen := 0 + maxChangeLen := 0 - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 - for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) + scaleLinear := func(it, width, max uint) uint { + if it == 0 || max == 0 { + return 0 } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) + + return 1 + (it * (width - 1) / max) + } + + for _, fs := range fileStats { + if len(fs.Name) > maxNameLen { + maxNameLen = len(fs.Name) + } + + changes := strconv.Itoa(fs.Addition + fs.Deletion) + if len(changes) > maxChangeLen { + maxChangeLen = len(changes) } } - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength - - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea - - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 - } - - finalOutput := "" + result := "" for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor))) - dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor))) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) - } + add := uint(fs.Addition) + del := uint(fs.Deletion) + np := maxNameLen - len(fs.Name) + cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion)) - return finalOutput + total := add + del + if total > maxGraphWidth { + add = scaleLinear(add, maxGraphWidth, total) + del = scaleLinear(del, maxGraphWidth, total) + } + + adds := strings.Repeat("+", int(add)) + dels := strings.Repeat("-", int(del)) + namePad := strings.Repeat(" ", np) + changePad := strings.Repeat(" ", cp) + + result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels) + } + return result } func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { @@ -313,8 +304,8 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { // File is deleted. cs.Name = from.Path() } else if from.Path() != to.Path() { - // File is renamed. Not supported. - // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) + // File is renamed. + cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) } else { cs.Name = from.Path() } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go index 7fed72c2..ad2b902c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go @@ -403,10 +403,16 @@ func min(a, b int) int { return b } +const maxMatrixSize = 10000 + func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) { // Allocate for the worst-case scenario where every pair has a score // that we need to consider. We might not need that many. - matrix := make(similarityMatrix, 0, len(srcs)*len(dsts)) + matrixSize := len(srcs) * len(dsts) + if matrixSize > maxMatrixSize { + matrixSize = maxMatrixSize + } + matrix := make(similarityMatrix, 0, matrixSize) srcSizes := make([]int64, len(srcs)) dstSizes := make([]int64, len(dsts)) dstTooLarge := make(map[int]bool) @@ -735,10 +741,7 @@ func (i *similarityIndex) add(key int, cnt uint64) error { // It's the same key, so increment the counter. var err error i.hashes[j], err = newKeyCountPair(key, v.count()+cnt) - if err != nil { - return err - } - return nil + return err } else if j+1 >= len(i.hashes) { j = 0 } else { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go new file mode 100644 index 00000000..91cf371f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go @@ -0,0 +1,101 @@ +package object + +import "bytes" + +const ( + signatureTypeUnknown signatureType = iota + signatureTypeOpenPGP + signatureTypeX509 + signatureTypeSSH +) + +var ( + // openPGPSignatureFormat is the format of an OpenPGP signature. + openPGPSignatureFormat = signatureFormat{ + []byte("-----BEGIN PGP SIGNATURE-----"), + []byte("-----BEGIN PGP MESSAGE-----"), + } + // x509SignatureFormat is the format of an X509 signature, which is + // a PKCS#7 (S/MIME) signature. + x509SignatureFormat = signatureFormat{ + []byte("-----BEGIN CERTIFICATE-----"), + } + + // sshSignatureFormat is the format of an SSH signature. + sshSignatureFormat = signatureFormat{ + []byte("-----BEGIN SSH SIGNATURE-----"), + } +) + +var ( + // knownSignatureFormats is a map of known signature formats, indexed by + // their signatureType. + knownSignatureFormats = map[signatureType]signatureFormat{ + signatureTypeOpenPGP: openPGPSignatureFormat, + signatureTypeX509: x509SignatureFormat, + signatureTypeSSH: sshSignatureFormat, + } +) + +// signatureType represents the type of the signature. +type signatureType int8 + +// signatureFormat represents the beginning of a signature. +type signatureFormat [][]byte + +// typeForSignature returns the type of the signature based on its format. +func typeForSignature(b []byte) signatureType { + for t, i := range knownSignatureFormats { + for _, begin := range i { + if bytes.HasPrefix(b, begin) { + return t + } + } + } + return signatureTypeUnknown +} + +// parseSignedBytes returns the position of the last signature block found in +// the given bytes. If no signature block is found, it returns -1. +// +// When multiple signature blocks are found, the position of the last one is +// returned. Any tailing bytes after this signature block start should be +// considered part of the signature. +// +// Given this, it would be safe to use the returned position to split the bytes +// into two parts: the first part containing the message, the second part +// containing the signature. +// +// Example: +// +// message := []byte(`Message with signature +// +// -----BEGIN SSH SIGNATURE----- +// ...`) +// +// var signature string +// if pos, _ := parseSignedBytes(message); pos != -1 { +// signature = string(message[pos:]) +// message = message[:pos] +// } +// +// This logic is on par with git's gpg-interface.c:parse_signed_buffer(). +// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668 +func parseSignedBytes(b []byte) (int, signatureType) { + var n, match = 0, -1 + var t signatureType + for n < len(b) { + var i = b[n:] + if st := typeForSignature(i); st != signatureTypeUnknown { + match = n + t = st + } + if eol := bytes.IndexByte(i, '\n'); eol >= 0 { + n += eol + 1 + continue + } + // If we reach this point, we've reached the end. + break + } + return match, t +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go index 46416580..cf46c08e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go @@ -1,18 +1,16 @@ package object import ( - "bufio" "bytes" "fmt" "io" - stdioutil "io/ioutil" "strings" - "golang.org/x/crypto/openpgp" - + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) // Tag represents an annotated tag object. It points to a single git object of @@ -93,9 +91,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) + r := sync.GetBufioReader(reader) + defer sync.PutBufioReader(r) + for { var line []byte line, err = r.ReadBytes('\n') @@ -128,40 +126,15 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { } } - data, err := stdioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return err } - - var pgpsig bool - // Check if data contains PGP signature. - if bytes.Contains(data, []byte(beginpgp)) { - // Split the lines at newline. - messageAndSig := bytes.Split(data, []byte("\n")) - - for _, l := range messageAndSig { - if pgpsig { - if bytes.Contains(l, []byte(endpgp)) { - t.PGPSignature += endpgp + "\n" - break - } else { - t.PGPSignature += string(l) + "\n" - } - continue - } - - // Check if it's the beginning of a PGP signature. - if bytes.Contains(l, []byte(beginpgp)) { - t.PGPSignature += beginpgp + "\n" - pgpsig = true - continue - } - - t.Message += string(l) + "\n" - } - } else { - t.Message = string(data) + if sm, _ := parseSignedBytes(data); sm >= 0 { + t.PGPSignature = string(data[sm:]) + data = data[:sm] } + t.Message = string(data) return nil } @@ -304,7 +277,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { return nil, err } - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) } // TagIter provides an iterator for a set of tags. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go index 5e6378ca..0fd0e513 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -1,19 +1,20 @@ package object import ( - "bufio" "context" "errors" "fmt" "io" "path" "path/filepath" + "sort" "strings" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/filemode" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/sync" ) const ( @@ -27,6 +28,7 @@ var ( ErrFileNotFound = errors.New("file not found") ErrDirectoryNotFound = errors.New("directory not found") ErrEntryNotFound = errors.New("entry not found") + ErrEntriesNotSorted = errors.New("entries in tree are not sorted") ) // Tree is basically like a directory - it references a bunch of other trees @@ -230,9 +232,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) + r := sync.GetBufioReader(reader) + defer sync.PutBufioReader(r) + for { str, err := r.ReadString(' ') if err != nil { @@ -270,6 +272,28 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { return nil } +type TreeEntrySorter []TreeEntry + +func (s TreeEntrySorter) Len() int { + return len(s) +} + +func (s TreeEntrySorter) Less(i, j int) bool { + name1 := s[i].Name + name2 := s[j].Name + if s[i].Mode == filemode.Dir { + name1 += "/" + } + if s[j].Mode == filemode.Dir { + name2 += "/" + } + return name1 < name2 +} + +func (s TreeEntrySorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // Encode transforms a Tree into a plumbing.EncodedObject. func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.TreeObject) @@ -279,7 +303,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(w, &err) + + if !sort.IsSorted(TreeEntrySorter(t.Entries)) { + return ErrEntriesNotSorted + } + for _, entry := range t.Entries { + if strings.IndexByte(entry.Name, 0) != -1 { + return fmt.Errorf("malformed filename %q", entry.Name) + } if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go index b4891b95..2adb6452 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder { } } +func (t *treeNoder) Skip() bool { + return false +} + func (t *treeNoder) isRoot() bool { return t.name == "" } @@ -84,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) { } } - return transformChildren(parent) + var err error + t.children, err = transformChildren(parent) + return t.children, err } // Returns the children of a tree as treenoders. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/BUILD.bazel index bde371d1..726aea8a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "advrefs_encode.go", "common.go", "doc.go", + "gitproto.go", "report_status.go", "shallowupd.go", "srvresp.go", diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go index 1bd724ca..f93ad304 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go @@ -57,7 +57,7 @@ func (a *AdvRefs) AddReference(r *plumbing.Reference) error { switch r.Type() { case plumbing.SymbolicReference: v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) - a.Capabilities.Add(capability.SymRef, v) + return a.Capabilities.Add(capability.SymRef, v) case plumbing.HashReference: a.References[r.Name().String()] = r.Hash() default: @@ -96,12 +96,12 @@ func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { // // Git versions prior to 1.8.4.3 has an special procedure to get // the reference where is pointing to HEAD: -// - Check if a reference called master exists. If exists and it -// has the same hash as HEAD hash, we can say that HEAD is pointing to master -// - If master does not exists or does not have the same hash as HEAD, -// order references and check in that order if that reference has the same -// hash than HEAD. If yes, set HEAD pointing to that branch hash -// - If no reference is found, throw an error +// - Check if a reference called master exists. If exists and it +// has the same hash as HEAD hash, we can say that HEAD is pointing to master +// - If master does not exists or does not have the same hash as HEAD, +// order references and check in that order if that reference has the same +// hash than HEAD. If yes, set HEAD pointing to that branch hash +// - If no reference is found, throw an error func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { if a.Head == nil { return nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go index 63bbe5ab..f8d26a28 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go @@ -133,6 +133,7 @@ func decodeFirstHash(p *advRefsDecoder) decoderStateFn { return nil } + // TODO: Use object-format (when available) for hash size. Git 2.41+ if len(p.line) < hashSize { p.error("cannot read hash, pkt-line too short") return nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go index a1297811..b52e8a49 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go @@ -1,6 +1,11 @@ // Package capability defines the server and client capabilities. package capability +import ( + "fmt" + "os" +) + // Capability describes a server or client capability. type Capability string @@ -230,9 +235,23 @@ const ( PushCert Capability = "push-cert" // SymRef symbolic reference support for better negotiation. SymRef Capability = "symref" + // ObjectFormat takes a hash algorithm as an argument, indicates that the + // server supports the given hash algorithms. + ObjectFormat Capability = "object-format" + // Filter if present, fetch-pack may send "filter" commands to request a + // partial clone or partial fetch and request that the server omit various objects from the packfile + Filter Capability = "filter" ) -const DefaultAgent = "go-git/4.x" +const userAgent = "go-git/5.x" + +// DefaultAgent provides the user agent string. +func DefaultAgent() string { + if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok { + return fmt.Sprintf("%s %s", userAgent, envUserAgent) + } + return userAgent +} var known = map[Capability]bool{ MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, @@ -241,10 +260,11 @@ var known = map[Capability]bool{ NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, + ObjectFormat: true, Filter: true, } var requiresArgument = map[Capability]bool{ - Agent: true, PushCert: true, SymRef: true, + Agent: true, PushCert: true, SymRef: true, ObjectFormat: true, } var multipleArgument = map[Capability]bool{ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go index f41ec799..553d81cb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go @@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string { // Set sets a capability removing the previous values func (l *List) Set(capability Capability, values ...string) error { - delete(l.m, capability) + if _, ok := l.m[capability]; ok { + l.m[capability].Values = l.m[capability].Values[:0] + } return l.Add(capability, values...) } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go index ab07ac8f..a858323e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go @@ -19,7 +19,6 @@ var ( // common sp = []byte(" ") eol = []byte("\n") - eq = []byte{'='} // advertised-refs null = []byte("\x00") @@ -49,6 +48,11 @@ func isFlush(payload []byte) bool { return len(payload) == 0 } +var ( + // ErrNilWriter is returned when a nil writer is passed to the encoder. + ErrNilWriter = fmt.Errorf("nil writer") +) + // ErrUnexpectedData represents an unexpected data decoding a message type ErrUnexpectedData struct { Msg string diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go new file mode 100644 index 00000000..0b7ff8f8 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go @@ -0,0 +1,120 @@ +package packp + +import ( + "fmt" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +var ( + // ErrInvalidGitProtoRequest is returned by Decode if the input is not a + // valid git protocol request. + ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request") +) + +// GitProtoRequest is a command request for the git protocol. +// It is used to send the command, endpoint, and extra parameters to the +// remote. +// See https://git-scm.com/docs/pack-protocol#_git_transport +type GitProtoRequest struct { + RequestCommand string + Pathname string + + // Optional + Host string + + // Optional + ExtraParams []string +} + +// validate validates the request. +func (g *GitProtoRequest) validate() error { + if g.RequestCommand == "" { + return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest) + } + + if g.Pathname == "" { + return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest) + } + + return nil +} + +// Encode encodes the request into the writer. +func (g *GitProtoRequest) Encode(w io.Writer) error { + if w == nil { + return ErrNilWriter + } + + if err := g.validate(); err != nil { + return err + } + + p := pktline.NewEncoder(w) + req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname) + if host := g.Host; host != "" { + req += fmt.Sprintf("host=%s\x00", host) + } + + if len(g.ExtraParams) > 0 { + req += "\x00" + for _, param := range g.ExtraParams { + req += param + "\x00" + } + } + + if err := p.Encode([]byte(req)); err != nil { + return err + } + + return nil +} + +// Decode decodes the request from the reader. +func (g *GitProtoRequest) Decode(r io.Reader) error { + s := pktline.NewScanner(r) + if !s.Scan() { + err := s.Err() + if err == nil { + return ErrInvalidGitProtoRequest + } + return err + } + + line := string(s.Bytes()) + if len(line) == 0 { + return io.EOF + } + + if line[len(line)-1] != 0 { + return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest) + } + + parts := strings.SplitN(line, " ", 2) + if len(parts) != 2 { + return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest) + } + + g.RequestCommand = parts[0] + params := strings.Split(parts[1], string(null)) + if len(params) < 1 { + return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest) + } + + g.Pathname = params[0] + if len(params) > 1 { + g.Host = strings.TrimPrefix(params[1], "host=") + } + + if len(params) > 2 { + for _, param := range params[2:] { + if param != "" { + g.ExtraParams = append(g.ExtraParams, param) + } + } + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go index b3a7ee80..a9ddb538 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go @@ -21,11 +21,6 @@ type ServerResponse struct { // Decode decodes the response into the struct, isMultiACK should be true, if // the request was done with multi_ack or multi_ack_detailed capabilities. func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { - // TODO: implement support for multi_ack or multi_ack_detailed responses - if isMultiACK { - return errors.New("multi_ack and multi_ack_detailed are not supported") - } - s := pktline.NewScanner(reader) for s.Scan() { @@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { } } - return s.Err() + // isMultiACK is true when the remote server advertises the related + // capabilities when they are not in transport.UnsupportedCapabilities. + // + // Users may decide to remove multi_ack and multi_ack_detailed from the + // unsupported capabilities list, which allows them to do initial clones + // from Azure DevOps. + // + // Follow-up fetches may error, therefore errors are wrapped with additional + // information highlighting that this capabilities are not supported by go-git. + // + // TODO: Implement support for multi_ack or multi_ack_detailed responses. + err := s.Err() + if err != nil && isMultiACK { + return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err) + } + + return err } // stopReading detects when a valid command such as ACK or NAK is found to be @@ -90,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error { return fmt.Errorf("unexpected flush") } - if bytes.Equal(line[0:3], ack) { - return r.decodeACKLine(line) - } + if len(line) >= 3 { + if bytes.Equal(line[0:3], ack) { + return r.decodeACKLine(line) + } - if bytes.Equal(line[0:3], nak) { - return nil + if bytes.Equal(line[0:3], nak) { + return nil + } } return fmt.Errorf("unexpected content %q", string(line)) @@ -113,8 +126,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error { } // Encode encodes the ServerResponse into a writer. -func (r *ServerResponse) Encode(w io.Writer) error { - if len(r.ACKs) > 1 { +func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error { + if len(r.ACKs) > 1 && !isMultiACK { + // For further information, refer to comments in the Decode func above. return errors.New("multi_ack and multi_ack_detailed are not supported") } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go index ddec06e9..344f8c7e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go @@ -95,7 +95,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { } if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + r.Capabilities.Set(capability.Agent, capability.DefaultAgent()) } return r diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go index 895a3bf6..3da29985 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go @@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error { return d.err } -// fills out the parser stiky error +// fills out the parser sticky error func (d *ulReqDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go index 4d927d8b..8f39b39c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go @@ -19,6 +19,7 @@ var ( type ReferenceUpdateRequest struct { Capabilities *capability.List Commands []*Command + Options []*Option Shallow *plumbing.Hash // Packfile contains an optional packfile reader. Packfile io.ReadCloser @@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU r := NewReferenceUpdateRequest() if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + r.Capabilities.Set(capability.Agent, capability.DefaultAgent()) } if adv.Supports(capability.ReportStatus) { @@ -86,9 +87,9 @@ type Action string const ( Create Action = "create" - Update = "update" - Delete = "delete" - Invalid = "invalid" + Update Action = "update" + Delete Action = "delete" + Invalid Action = "invalid" ) type Command struct { @@ -120,3 +121,8 @@ func (c *Command) validate() error { return nil } + +type Option struct { + Key string + Value string +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go index 2c9843a5..076de545 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/format/pktline" @@ -81,7 +80,7 @@ func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { var ok bool rc, ok = r.(io.ReadCloser) if !ok { - rc = ioutil.NopCloser(r) + rc = io.NopCloser(r) } d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go index 2545e935..1205cfaf 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go @@ -9,10 +9,6 @@ import ( "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" ) -var ( - zeroHashString = plumbing.ZeroHash.String() -) - // Encode writes the ReferenceUpdateRequest encoding to the stream. func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { if err := req.validate(); err != nil { @@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { return err } + if req.Capabilities.Supports(capability.PushOptions) { + if err := req.encodeOptions(e, req.Options); err != nil { + return err + } + } + if req.Packfile != nil { if _, err := io.Copy(w, req.Packfile); err != nil { return err @@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string { n := cmd.New.String() return fmt.Sprintf("%s %s %s", o, n, cmd.Name) } + +func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder, + opts []*Option) error { + + for _, opt := range opts { + if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil { + return err + } + } + + return e.Flush() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go index de2206b3..48f44385 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go @@ -38,10 +38,10 @@ func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackReque } } -// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants -// length is zero +// IsEmpty returns whether a request is empty - it is empty if Haves are contained +// in the Wants, or if Wants length is zero, and we don't have any shallows func (r *UploadPackRequest) IsEmpty() bool { - return isSubset(r.Wants, r.Haves) + return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0 } func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go index a9a7192e..a485cb7b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go @@ -24,7 +24,6 @@ type UploadPackResponse struct { r io.ReadCloser isShallow bool isMultiACK bool - isOk bool } // NewUploadPackResponse create a new UploadPackResponse instance, the request @@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) { } } - if err := r.ServerResponse.Encode(w); err != nil { + if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go index 08e908f1..ddba9302 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go @@ -3,6 +3,7 @@ package plumbing import ( "errors" "fmt" + "regexp" "strings" ) @@ -15,10 +16,11 @@ const ( symrefPrefix = "ref: " ) -// RefRevParseRules are a set of rules to parse references into short names. -// These are the same rules as used by git in shorten_unambiguous_ref. +// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference. +// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref. // See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 var RefRevParseRules = []string{ + "%s", "refs/%s", "refs/tags/%s", "refs/heads/%s", @@ -28,6 +30,9 @@ var RefRevParseRules = []string{ var ( ErrReferenceNotFound = errors.New("reference not found") + + // ErrInvalidReferenceName is returned when a reference name is invalid. + ErrInvalidReferenceName = errors.New("invalid reference name") ) // ReferenceType reference type's @@ -113,7 +118,7 @@ func (r ReferenceName) String() string { func (r ReferenceName) Short() string { s := string(r) res := s - for _, format := range RefRevParseRules { + for _, format := range RefRevParseRules[1:] { _, err := fmt.Sscanf(s, format, &res) if err == nil { continue @@ -123,9 +128,95 @@ func (r ReferenceName) Short() string { return res } +var ( + ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`) +) + +// Validate validates a reference name. +// This follows the git-check-ref-format rules. +// See https://git-scm.com/docs/git-check-ref-format +// +// It is important to note that this function does not check if the reference +// exists in the repository. +// It only checks if the reference name is valid. +// This functions does not support the --refspec-pattern, --normalize, and +// --allow-onelevel options. +// +// Git imposes the following rules on how references are named: +// +// 1. They can include slash / for hierarchical (directory) grouping, but no +// slash-separated component can begin with a dot . or end with the +// sequence .lock. +// 2. They must contain at least one /. This enforces the presence of a +// category like heads/, tags/ etc. but the actual names are not +// restricted. If the --allow-onelevel option is used, this rule is +// waived. +// 3. They cannot have two consecutive dots .. anywhere. +// 4. They cannot have ASCII control characters (i.e. bytes whose values are +// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon : +// anywhere. +// 5. They cannot have question-mark ?, asterisk *, or open bracket [ +// anywhere. See the --refspec-pattern option below for an exception to this +// rule. +// 6. They cannot begin or end with a slash / or contain multiple consecutive +// slashes (see the --normalize option below for an exception to this rule). +// 7. They cannot end with a dot .. +// 8. They cannot contain a sequence @{. +// 9. They cannot be the single character @. +// 10. They cannot contain a \. +func (r ReferenceName) Validate() error { + s := string(r) + if len(s) == 0 { + return ErrInvalidReferenceName + } + + // HEAD is a special case + if r == HEAD { + return nil + } + + // rule 7 + if strings.HasSuffix(s, ".") { + return ErrInvalidReferenceName + } + + // rule 2 + parts := strings.Split(s, "/") + if len(parts) < 2 { + return ErrInvalidReferenceName + } + + isBranch := r.IsBranch() + isTag := r.IsTag() + for _, part := range parts { + // rule 6 + if len(part) == 0 { + return ErrInvalidReferenceName + } + + if strings.HasPrefix(part, ".") || // rule 1 + strings.Contains(part, "..") || // rule 3 + ctrlSeqs.MatchString(part) || // rule 4 + strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5 + strings.Contains(part, "@{") || // rule 8 + part == "@" || // rule 9 + strings.Contains(part, "\\") || // rule 10 + strings.HasSuffix(part, ".lock") { // rule 1 + return ErrInvalidReferenceName + } + + if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with - + return ErrInvalidReferenceName + } + } + + return nil +} + const ( HEAD ReferenceName = "HEAD" Master ReferenceName = "refs/heads/master" + Main ReferenceName = "refs/heads/main" ) // Reference is a representation of git reference @@ -168,22 +259,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference { } } -// Type return the type of a reference +// Type returns the type of a reference func (r *Reference) Type() ReferenceType { return r.t } -// Name return the name of a reference +// Name returns the name of a reference func (r *Reference) Name() ReferenceName { return r.n } -// Hash return the hash of a hash reference +// Hash returns the hash of a hash reference func (r *Reference) Hash() Hash { return r.h } -// Target return the target of a symbolic reference +// Target returns the target of a symbolic reference func (r *Reference) Target() ReferenceName { return r.target } @@ -204,6 +295,21 @@ func (r *Reference) Strings() [2]string { } func (r *Reference) String() string { - s := r.Strings() - return fmt.Sprintf("%s %s", s[1], s[0]) + ref := "" + switch r.Type() { + case HashReference: + ref = r.Hash().String() + case SymbolicReference: + ref = symrefPrefix + r.Target().String() + default: + return "" + } + + name := r.Name().String() + var v strings.Builder + v.Grow(len(ref) + len(name) + 1) + v.WriteString(ref) + v.WriteString(" ") + v.WriteString(name) + return v.String() } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go index dfe309db..126b3742 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go @@ -42,6 +42,7 @@ type EncodedObjectStorer interface { HasEncodedObject(plumbing.Hash) error // EncodedObjectSize returns the plaintext size of the encoded object. EncodedObjectSize(plumbing.Hash) (int64, error) + AddAlternate(remote string) error } // DeltaObjectStorer is an EncodedObjectStorer that can return delta @@ -52,8 +53,8 @@ type DeltaObjectStorer interface { DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) } -// Transactioner is a optional method for ObjectStorer, it enable transaction -// base write and read operations in the storage +// Transactioner is a optional method for ObjectStorer, it enables transactional read and write +// operations. type Transactioner interface { // Begin starts a transaction. Begin() Transaction @@ -87,8 +88,8 @@ type PackedObjectStorer interface { DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error } -// PackfileWriter is a optional method for ObjectStorer, it enable direct write -// of packfile to the storage +// PackfileWriter is an optional method for ObjectStorer, it enables directly writing +// a packfile to storage. type PackfileWriter interface { // PackfileWriter returns a writer for writing a packfile to the storage // diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go index 4f6d210e..1948c230 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go @@ -35,6 +35,10 @@ func InstallProtocol(scheme string, c transport.Transport) { // http://, https://, ssh:// and file://. // See `InstallProtocol` to add or modify protocols. func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { + return getTransport(endpoint) +} + +func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) { f, ok := Protocols[endpoint.Protocol] if !ok { return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) @@ -43,6 +47,5 @@ func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { if f == nil { return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) } - return f, nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go index ead21555..b05437fb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go @@ -58,6 +58,11 @@ type Session interface { // If the repository does not exist, returns ErrRepositoryNotFound. // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. AdvertisedReferences() (*packp.AdvRefs, error) + // AdvertisedReferencesContext retrieves the advertised references for a + // repository. + // If the repository does not exist, returns ErrRepositoryNotFound. + // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. + AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error) io.Closer } @@ -103,10 +108,45 @@ type Endpoint struct { // Host is the host. Host string // Port is the port to connect, if 0 the default port for the given protocol - // wil be used. + // will be used. Port int // Path is the repository path. Path string + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CaBundle specify additional ca bundle with system cert pool + CaBundle []byte + // Proxy provides info required for connecting to a proxy. + Proxy ProxyOptions +} + +type ProxyOptions struct { + URL string + Username string + Password string +} + +func (o *ProxyOptions) Validate() error { + if o.URL != "" { + _, err := url.Parse(o.URL) + return err + } + return nil +} + +func (o *ProxyOptions) FullURL() (*url.URL, error) { + proxyURL, err := url.Parse(o.URL) + if err != nil { + return nil, err + } + if o.Username != "" { + if o.Password != "" { + proxyURL.User = url.UserPassword(o.Username, o.Password) + } else { + proxyURL.User = url.User(o.Username) + } + } + return proxyURL, nil } var defaultPorts = map[string]int{ @@ -187,11 +227,17 @@ func parseURL(endpoint string) (*Endpoint, error) { pass, _ = u.User.Password() } + host := u.Hostname() + if strings.Contains(host, ":") { + // IPv6 address + host = "[" + host + "]" + } + return &Endpoint{ Protocol: u.Scheme, User: user, Password: pass, - Host: u.Hostname(), + Host: host, Port: getPort(u), Path: getPath(u), }, nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/BUILD.bazel index 160510cf..f703763e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/BUILD.bazel @@ -14,5 +14,6 @@ go_library( "//vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common", "//vendor/github.com/go-git/go-git/v5/plumbing/transport/server", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", + "//vendor/golang.org/x/sys/execabs", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go index f6e23652..38714e2a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go @@ -6,12 +6,12 @@ import ( "errors" "io" "os" - "os/exec" "path/filepath" "strings" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "golang.org/x/sys/execabs" ) // DefaultClient is the default local client. @@ -36,7 +36,7 @@ func NewClient(uploadPackBin, receivePackBin string) transport.Transport { func prefixExecPath(cmd string) (string, error) { // Use `git --exec-path` to find the exec path. - execCmd := exec.Command("git", "--exec-path") + execCmd := execabs.Command("git", "--exec-path") stdout, err := execCmd.StdoutPipe() if err != nil { @@ -54,7 +54,7 @@ func prefixExecPath(cmd string) (string, error) { return "", err } if isPrefix { - return "", errors.New("Couldn't read exec-path line all at once") + return "", errors.New("couldn't read exec-path line all at once") } err = execCmd.Wait() @@ -66,7 +66,7 @@ func prefixExecPath(cmd string) (string, error) { cmd = filepath.Join(execPath, cmd) // Make sure it actually exists. - _, err = exec.LookPath(cmd) + _, err = execabs.LookPath(cmd) if err != nil { return "", err } @@ -83,9 +83,9 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth cmd = r.ReceivePackBin } - _, err := exec.LookPath(cmd) + _, err := execabs.LookPath(cmd) if err != nil { - if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound { cmd, err = prefixExecPath(cmd) if err != nil { return nil, err @@ -95,11 +95,11 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth } } - return &command{cmd: exec.Command(cmd, ep.Path)}, nil + return &command{cmd: execabs.Command(cmd, ep.Path)}, nil } type command struct { - cmd *exec.Cmd + cmd *execabs.Cmd stderrCloser io.Closer closed bool } @@ -148,7 +148,7 @@ func (c *command) Close() error { } // When a repository does not exist, the command exits with code 128. - if _, ok := err.(*exec.ExitError); ok { + if _, ok := err.(*execabs.ExitError); ok { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/BUILD.bazel index 94681bde..a0c8af63 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/BUILD.bazel @@ -7,7 +7,7 @@ go_library( importpath = "github.com/go-git/go-git/v5/plumbing/transport/git", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/go-git/go-git/v5/plumbing/format/pktline", + "//vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp", "//vendor/github.com/go-git/go-git/v5/plumbing/transport", "//vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go index 306aae26..2b878b03 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go @@ -2,11 +2,11 @@ package git import ( - "fmt" "io" "net" + "strconv" - "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/internal/common" "github.com/go-git/go-git/v5/utils/ioutil" @@ -41,10 +41,18 @@ type command struct { // Start executes the command sending the required message to the TCP connection func (c *command) Start() error { - cmd := endpointToCommand(c.command, c.endpoint) + req := packp.GitProtoRequest{ + RequestCommand: c.command, + Pathname: c.endpoint.Path, + } + host := c.endpoint.Host + if c.endpoint.Port != DefaultPort { + host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port)) + } - e := pktline.NewEncoder(c.conn) - return e.Encode([]byte(cmd)) + req.Host = host + + return req.Encode(c.conn) } func (c *command) connect() error { @@ -69,7 +77,7 @@ func (c *command) getHostWithPort() string { port = DefaultPort } - return fmt.Sprintf("%s:%d", host, port) + return net.JoinHostPort(host, strconv.Itoa(port)) } // StderrPipe git protocol doesn't have any dedicated error channel @@ -77,27 +85,18 @@ func (c *command) StderrPipe() (io.Reader, error) { return nil, nil } -// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent +// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent // call to the Close function from the connection, a command execution in git // protocol can't be closed or killed func (c *command) StdinPipe() (io.WriteCloser, error) { return ioutil.WriteNopCloser(c.conn), nil } -// StdoutPipe return the underlying connection as Reader +// StdoutPipe returns the underlying connection as Reader func (c *command) StdoutPipe() (io.Reader, error) { return c.conn, nil } -func endpointToCommand(cmd string, ep *transport.Endpoint) string { - host := ep.Host - if ep.Port != DefaultPort { - host = fmt.Sprintf("%s:%d", ep.Host, ep.Port) - } - - return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0) -} - // Close closes the TCP connection and connection. func (c *command) Close() error { if !c.connected { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/BUILD.bazel index 8e0576c4..33160020 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "common.go", "receive_pack.go", + "transport.go", "upload_pack.go", ], importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/plumbing/transport/http", @@ -19,5 +20,6 @@ go_library( "//vendor/github.com/go-git/go-git/v5/plumbing/transport", "//vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", + "//vendor/github.com/golang/groupcache/lru", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index aeedc5bb..1c4ceee6 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -3,16 +3,23 @@ package http import ( "bytes" + "context" + "crypto/tls" + "crypto/x509" "fmt" "net" "net/http" + "net/url" + "reflect" "strconv" "strings" + "sync" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/protocol/packp" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/golang/groupcache/lru" ) // it requires a bytes.Buffer, because we need to know the length @@ -32,7 +39,7 @@ func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string const infoRefsPath = "/info/refs" -func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) { +func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) { url := fmt.Sprintf( "%s%s?service=%s", s.endpoint.String(), infoRefsPath, serviceName, @@ -45,7 +52,7 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e s.ApplyAuthToRequest(req) applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) - res, err := s.client.Do(req) + res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, err } @@ -66,6 +73,17 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e return nil, err } + // Git 2.41+ returns a zero-id plus capabilities when an empty + // repository is being cloned. This skips the existing logic within + // advrefs_decode.decodeFirstHash, which expects a flush-pkt instead. + // + // This logic aligns with plumbing/transport/internal/common/common.go. + if ar.IsEmpty() && + // Empty repositories are valid for git-receive-pack. + transport.ReceivePackServiceName != serviceName { + return nil, transport.ErrEmptyRemoteRepository + } + transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar @@ -73,40 +91,83 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e } type client struct { - c *http.Client + client *http.Client + transports *lru.Cache + mutex sync.RWMutex } -// DefaultClient is the default HTTP client, which uses `http.DefaultClient`. -var DefaultClient = NewClient(nil) +// ClientOptions holds user configurable options for the client. +type ClientOptions struct { + // CacheMaxEntries is the max no. of entries that the transport objects + // cache will hold at any given point of time. It must be a positive integer. + // Calling `client.addTransport()` after the cache has reached the specified + // size, will result in the least recently used transport getting deleted + // before the provided transport is added to the cache. + CacheMaxEntries int +} + +var ( + // defaultTransportCacheSize is the default capacity of the transport objects cache. + // Its value is 0 because transport caching is turned off by default and is an + // opt-in feature. + defaultTransportCacheSize = 0 + + // DefaultClient is the default HTTP client, which uses a net/http client configured + // with http.DefaultTransport. + DefaultClient = NewClient(nil) +) // NewClient creates a new client with a custom net/http client. // See `InstallProtocol` to install and override default http client. -// Unless a properly initialized client is given, it will fall back into -// `http.DefaultClient`. +// If the net/http client is nil or empty, it will use a net/http client configured +// with http.DefaultTransport. // // Note that for HTTP client cannot distinguish between private repositories and // unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` // for both. func NewClient(c *http.Client) transport.Transport { if c == nil { - return &client{http.DefaultClient} + c = &http.Client{ + Transport: http.DefaultTransport, + } + } + return NewClientWithOptions(c, &ClientOptions{ + CacheMaxEntries: defaultTransportCacheSize, + }) +} + +// NewClientWithOptions returns a new client configured with the provided net/http client +// and other custom options specific to the client. +// If the net/http client is nil or empty, it will use a net/http client configured +// with http.DefaultTransport. +func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport { + if c == nil { + c = &http.Client{ + Transport: http.DefaultTransport, + } + } + cl := &client{ + client: c, } - return &client{ - c: c, + if opts != nil { + if opts.CacheMaxEntries > 0 { + cl.transports = lru.New(opts.CacheMaxEntries) + } } + return cl } func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.UploadPackSession, error) { - return newUploadPackSession(c.c, ep, auth) + return newUploadPackSession(c, ep, auth) } func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.ReceivePackSession, error) { - return newReceivePackSession(c.c, ep, auth) + return newReceivePackSession(c, ep, auth) } type session struct { @@ -116,10 +177,106 @@ type session struct { advRefs *packp.AdvRefs } -func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { +func transportWithInsecureTLS(transport *http.Transport) { + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + transport.TLSClientConfig.InsecureSkipVerify = true +} + +func transportWithCABundle(transport *http.Transport, caBundle []byte) error { + rootCAs, err := x509.SystemCertPool() + if err != nil { + return err + } + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + rootCAs.AppendCertsFromPEM(caBundle) + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + transport.TLSClientConfig.RootCAs = rootCAs + return nil +} + +func transportWithProxy(transport *http.Transport, proxyURL *url.URL) { + transport.Proxy = http.ProxyURL(proxyURL) +} + +func configureTransport(transport *http.Transport, ep *transport.Endpoint) error { + if len(ep.CaBundle) > 0 { + if err := transportWithCABundle(transport, ep.CaBundle); err != nil { + return err + } + } + if ep.InsecureSkipTLS { + transportWithInsecureTLS(transport) + } + + if ep.Proxy.URL != "" { + proxyURL, err := ep.Proxy.FullURL() + if err != nil { + return err + } + transportWithProxy(transport, proxyURL) + } + return nil +} + +func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { + var httpClient *http.Client + + // We need to configure the http transport if there are transport specific + // options present in the endpoint. + if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" { + var transport *http.Transport + // if the client wasn't configured to have a cache for transports then just configure + // the transport and use it directly, otherwise try to use the cache. + if c.transports == nil { + tr, ok := c.client.Transport.(*http.Transport) + if !ok { + return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s", + reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport)) + } + + transport = tr.Clone() + configureTransport(transport, ep) + } else { + transportOpts := transportOptions{ + caBundle: string(ep.CaBundle), + insecureSkipTLS: ep.InsecureSkipTLS, + } + if ep.Proxy.URL != "" { + proxyURL, err := ep.Proxy.FullURL() + if err != nil { + return nil, err + } + transportOpts.proxyURL = *proxyURL + } + var found bool + transport, found = c.fetchTransport(transportOpts) + + if !found { + transport = c.client.Transport.(*http.Transport).Clone() + configureTransport(transport, ep) + c.addTransport(transportOpts, transport) + } + } + + httpClient = &http.Client{ + Transport: transport, + CheckRedirect: c.client.CheckRedirect, + Jar: c.client.Jar, + Timeout: c.client.Timeout, + } + } else { + httpClient = c.client + } + s := &session{ auth: basicAuthFromEndpoint(ep), - client: c, + client: httpClient, endpoint: ep, } if auth != nil { @@ -249,14 +406,28 @@ func (a *TokenAuth) String() string { // Err is a dedicated error to return errors based on status code type Err struct { Response *http.Response + Reason string } -// NewErr returns a new Err based on a http response +// NewErr returns a new Err based on a http response and closes response body +// if needed func NewErr(r *http.Response) error { if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { return nil } + var reason string + + // If a response message is present, add it to error + var messageBuffer bytes.Buffer + if r.Body != nil { + messageLength, _ := messageBuffer.ReadFrom(r.Body) + if messageLength > 0 { + reason = messageBuffer.String() + } + _ = r.Body.Close() + } + switch r.StatusCode { case http.StatusUnauthorized: return transport.ErrAuthenticationRequired @@ -266,7 +437,7 @@ func NewErr(r *http.Response) error { return transport.ErrRepositoryNotFound } - return plumbing.NewUnexpectedError(&Err{r}) + return plumbing.NewUnexpectedError(&Err{r, reason}) } // StatusCode returns the status code of the response diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go index 433dfcfd..3e736cd9 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go @@ -19,13 +19,17 @@ type rpSession struct { *session } -func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { +func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { s, err := newSession(c, ep, auth) return &rpSession{s}, err } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.ReceivePackServiceName) + return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName) +} + +func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { + return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName) } func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( @@ -98,7 +102,6 @@ func (s *rpSession) doRequest( } if err := NewErr(res); err != nil { - _ = res.Body.Close() return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go new file mode 100644 index 00000000..c8db3892 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go @@ -0,0 +1,40 @@ +package http + +import ( + "net/http" + "net/url" +) + +// transportOptions contains transport specific configuration. +type transportOptions struct { + insecureSkipTLS bool + // []byte is not comparable. + caBundle string + proxyURL url.URL +} + +func (c *client) addTransport(opts transportOptions, transport *http.Transport) { + c.mutex.Lock() + c.transports.Add(opts, transport) + c.mutex.Unlock() +} + +func (c *client) removeTransport(opts transportOptions) { + c.mutex.Lock() + c.transports.Remove(opts) + c.mutex.Unlock() +} + +func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) { + c.mutex.RLock() + t, ok := c.transports.Get(opts) + c.mutex.RUnlock() + if !ok { + return nil, false + } + transport, ok := t.(*http.Transport) + if !ok { + return nil, false + } + return transport, true +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go index db370894..3432618a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go @@ -19,13 +19,17 @@ type upSession struct { *session } -func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { +func newUploadPackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { s, err := newSession(c, ep, auth) return &upSession{s}, err } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.UploadPackServiceName) + return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName) +} + +func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { + return advertisedReferences(ctx, s.session, transport.UploadPackServiceName) } func (s *upSession) UploadPack( @@ -96,7 +100,6 @@ func (s *upSession) doRequest( } if err := NewErr(res); err != nil { - _ = res.Body.Close() return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/BUILD.bazel index 9506f6b8..64c0a112 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "common", srcs = [ "common.go", + "mocks.go", "server.go", ], importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common", diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go index 89432e34..9e1d0235 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go @@ -11,7 +11,7 @@ import ( "errors" "fmt" "io" - stdioutil "io/ioutil" + "regexp" "strings" "time" @@ -29,6 +29,10 @@ const ( var ( ErrTimeoutExceeded = errors.New("timeout exceeded") + // stdErrSkipPattern is used for skipping lines from a command's stderr output. + // Any line matching this pattern will be skipped from further + // processing and not be returned to calling code. + stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$") ) // Commander creates Command instances. This is the main entry point for @@ -150,26 +154,37 @@ func (c *client) listenFirstError(r io.Reader) chan string { errLine := make(chan string, 1) go func() { s := bufio.NewScanner(r) - if s.Scan() { - errLine <- s.Text() - } else { - close(errLine) + for { + if s.Scan() { + line := s.Text() + if !stdErrSkipPattern.MatchString(line) { + errLine <- line + break + } + } else { + close(errLine) + break + } } - _, _ = io.Copy(stdioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) }() return errLine } -// AdvertisedReferences retrieves the advertised references from the server. func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +// AdvertisedReferences retrieves the advertised references from the server. +func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { if s.advRefs != nil { return s.advRefs, nil } ar := packp.NewAdvRefs() - if err := ar.Decode(s.Stdout); err != nil { + if err := ar.Decode(s.StdoutContext(ctx)); err != nil { if err := s.handleAdvRefDecodeError(err); err != nil { return nil, err } @@ -188,9 +203,22 @@ func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { } func (s *session) handleAdvRefDecodeError(err error) error { + var errLine *pktline.ErrorLine + if errors.As(err, &errLine) { + if isRepoNotFoundError(errLine.Text) { + return transport.ErrRepositoryNotFound + } + + return errLine + } + // If repository is not found, we get empty stdout and server writes an // error to stderr. - if err == packp.ErrEmptyInput { + if errors.Is(err, packp.ErrEmptyInput) { + // TODO:(v6): handle this error in a better way. + // Instead of checking the stderr output for a specific error message, + // define an ExitError and embed the stderr output and exit (if one + // exists) in the error struct. Just like exec.ExitError. s.finished = true if err := s.checkNotFoundError(); err != nil { return err @@ -230,6 +258,12 @@ func (s *session) handleAdvRefDecodeError(err error) error { // returned with the packfile content. The reader must be closed after reading. func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { if req.IsEmpty() { + // XXX: IsEmpty means haves are a subset of wants, in that case we have + // everything we asked for. Close the connection and return nil. + if err := s.finish(); err != nil { + return nil, err + } + // TODO:(v6) return nil here return nil, transport.ErrEmptyUploadPackRequest } @@ -237,7 +271,7 @@ func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) return nil, err } - if _, err := s.AdvertisedReferences(); err != nil { + if _, err := s.AdvertisedReferencesContext(ctx); err != nil { return nil, err } @@ -370,7 +404,7 @@ func (s *session) checkNotFoundError() error { case <-t.C: return ErrTimeoutExceeded case line, ok := <-s.firstErrLine: - if !ok { + if !ok || len(line) == 0 { return nil } @@ -378,59 +412,43 @@ func (s *session) checkNotFoundError() error { return transport.ErrRepositoryNotFound } + // TODO:(v6): return server error just as it is without a prefix return fmt.Errorf("unknown error: %s", line) } } -var ( - githubRepoNotFoundErr = "ERROR: Repository not found." - bitbucketRepoNotFoundErr = "conq: repository does not exist." +const ( + githubRepoNotFoundErr = "Repository not found." + bitbucketRepoNotFoundErr = "repository does not exist." localRepoNotFoundErr = "does not appear to be a git repository" - gitProtocolNotFoundErr = "ERR \n Repository not found." - gitProtocolNoSuchErr = "ERR no such repository" - gitProtocolAccessDeniedErr = "ERR access denied" - gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" + gitProtocolNotFoundErr = "Repository not found." + gitProtocolNoSuchErr = "no such repository" + gitProtocolAccessDeniedErr = "access denied" + gogsAccessDeniedErr = "Repository does not exist or you do not have access" + gitlabRepoNotFoundErr = "The project you were looking for could not be found" ) func isRepoNotFoundError(s string) bool { - if strings.HasPrefix(s, githubRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { - return true - } - - if strings.HasSuffix(s, localRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNoSuchErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolAccessDeniedErr) { - return true - } - - if strings.HasPrefix(s, gogsAccessDeniedErr) { - return true + for _, err := range []string{ + githubRepoNotFoundErr, + bitbucketRepoNotFoundErr, + localRepoNotFoundErr, + gitProtocolNotFoundErr, + gitProtocolNoSuchErr, + gitProtocolAccessDeniedErr, + gogsAccessDeniedErr, + gitlabRepoNotFoundErr, + } { + if strings.Contains(s, err) { + return true + } } return false } -var ( - nak = []byte("NAK") - eol = []byte("\n") -) - // uploadPack implements the git-upload-pack protocol. -func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { +func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error { // TODO support multi_ack mode // TODO support multi_ack_detailed mode // TODO support acks for common objects diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go new file mode 100644 index 00000000..bc18b27e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go @@ -0,0 +1,46 @@ +package common + +import ( + "bytes" + "io" + + gogitioutil "github.com/go-git/go-git/v5/utils/ioutil" + + "github.com/go-git/go-git/v5/plumbing/transport" +) + +type MockCommand struct { + stdin bytes.Buffer + stdout bytes.Buffer + stderr bytes.Buffer +} + +func (c MockCommand) StderrPipe() (io.Reader, error) { + return &c.stderr, nil +} + +func (c MockCommand) StdinPipe() (io.WriteCloser, error) { + return gogitioutil.WriteNopCloser(&c.stdin), nil +} + +func (c MockCommand) StdoutPipe() (io.Reader, error) { + return &c.stdout, nil +} + +func (c MockCommand) Start() error { + return nil +} + +func (c MockCommand) Close() error { + panic("not implemented") +} + +type MockCommander struct { + stderr string +} + +func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) { + return &MockCommand{ + stderr: *bytes.NewBufferString(c.stderr), + }, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go index 727f9021..cf5d6f43 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go @@ -108,6 +108,10 @@ type upSession struct { } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { @@ -185,7 +189,7 @@ func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Ha } func (*upSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil { return err } @@ -204,6 +208,10 @@ type rpSession struct { } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { @@ -347,7 +355,7 @@ func (s *rpSession) reportStatus() *packp.ReportStatus { } func (*rpSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/BUILD.bazel index cf777302..cfa265bc 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/BUILD.bazel @@ -13,10 +13,9 @@ go_library( "//vendor/github.com/go-git/go-git/v5/plumbing/transport", "//vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common", "//vendor/github.com/kevinburke/ssh_config", - "//vendor/github.com/mitchellh/go-homedir", + "//vendor/github.com/skeema/knownhosts", "//vendor/github.com/xanzy/ssh-agent", "//vendor/golang.org/x/crypto/ssh", - "//vendor/golang.org/x/crypto/ssh/knownhosts", "@org_golang_x_net//proxy", ], ) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go index b79a74e4..ac4e3583 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go @@ -1,21 +1,17 @@ package ssh import ( - "crypto/x509" - "encoding/pem" "errors" "fmt" - "io/ioutil" "os" "os/user" "path/filepath" "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/mitchellh/go-homedir" - "github.com/xanzy/ssh-agent" + "github.com/skeema/knownhosts" + sshagent "github.com/xanzy/ssh-agent" "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/knownhosts" ) const DefaultUsername = "git" @@ -121,27 +117,15 @@ type PublicKeys struct { // NewPublicKeys returns a PublicKeys from a PEM encoded private key. An // encryption password should be given if the pemBytes contains a password // encrypted PEM block otherwise password should be empty. It supports RSA -// (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +// (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("invalid PEM data") - } - if x509.IsEncryptedPEMBlock(block) { - key, err := x509.DecryptPEMBlock(block, []byte(password)) - if err != nil { - return nil, err - } - - block = &pem.Block{Type: block.Type, Bytes: key} - pemBytes = pem.EncodeToMemory(block) - } - signer, err := ssh.ParsePrivateKey(pemBytes) + if _, ok := err.(*ssh.PassphraseMissingError); ok { + signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password)) + } if err != nil { return nil, err } - return &PublicKeys{User: user, Signer: signer}, nil } @@ -149,7 +133,7 @@ func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, // encoded private key. An encryption password should be given if the pemBytes // contains a password encrypted PEM block otherwise password should be empty. func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { - bytes, err := ioutil.ReadFile(pemFile) + bytes, err := os.ReadFile(pemFile) if err != nil { return nil, err } @@ -238,12 +222,19 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { // // If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS // environment variable, example: -// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file +// +// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file // // If SSH_KNOWN_HOSTS is not set the following file locations will be used: -// ~/.ssh/known_hosts -// /etc/ssh/ssh_known_hosts +// +// ~/.ssh/known_hosts +// /etc/ssh/ssh_known_hosts func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { + kh, err := newKnownHosts(files...) + return ssh.HostKeyCallback(kh), err +} + +func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) { var err error if len(files) == 0 { @@ -265,7 +256,7 @@ func getDefaultKnownHostsFiles() ([]string, error) { return files, nil } - homeDirPath, err := homedir.Dir() + homeDirPath, err := os.UserHomeDir() if err != nil { return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go index c05ded98..05dea448 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -4,11 +4,14 @@ package ssh import ( "context" "fmt" + "net" "reflect" "strconv" + "strings" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "github.com/skeema/knownhosts" "github.com/kevinburke/ssh_config" "golang.org/x/crypto/ssh" @@ -46,7 +49,9 @@ type runner struct { func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { - c.setAuth(auth) + if err := c.setAuth(auth); err != nil { + return nil, err + } } if err := c.connect(); err != nil { @@ -90,8 +95,14 @@ func (c *command) Close() error { //XXX: If did read the full packfile, then the session might be already // closed. _ = c.Session.Close() + err := c.client.Close() - return c.client.Close() + //XXX: in go1.16+ we can use errors.Is(err, net.ErrClosed) + if err != nil && strings.HasSuffix(err.Error(), "use of closed network connection") { + return nil + } + + return err } // connect connects to the SSH server, unless a AuthMethod was set with @@ -114,10 +125,24 @@ func (c *command) connect() error { if err != nil { return err } + hostWithPort := c.getHostWithPort() + if config.HostKeyCallback == nil { + kh, err := newKnownHosts() + if err != nil { + return err + } + config.HostKeyCallback = kh.HostKeyCallback() + config.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort) + } else if len(config.HostKeyAlgorithms) == 0 { + // Set the HostKeyAlgorithms based on HostKeyCallback. + // For background see https://github.com/go-git/go-git/issues/411 as well as + // https://github.com/golang/go/issues/29286 for root cause. + config.HostKeyAlgorithms = knownhosts.HostKeyAlgorithms(config.HostKeyCallback, hostWithPort) + } overrideConfig(c.config, config) - c.client, err = dial("tcp", c.getHostWithPort(), config) + c.client, err = dial("tcp", hostWithPort, c.endpoint.Proxy, config) if err != nil { return err } @@ -132,7 +157,7 @@ func (c *command) connect() error { return nil } -func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { +func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.ClientConfig) (*ssh.Client, error) { var ( ctx = context.Background() cancel context.CancelFunc @@ -144,10 +169,33 @@ func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { } defer cancel() - conn, err := proxy.Dial(ctx, network, addr) - if err != nil { - return nil, err + var conn net.Conn + var dialErr error + + if proxyOpts.URL != "" { + proxyUrl, err := proxyOpts.FullURL() + if err != nil { + return nil, err + } + dialer, err := proxy.FromURL(proxyUrl, proxy.Direct) + if err != nil { + return nil, err + } + + // Try to use a ContextDialer, but fall back to a Dialer if that goes south. + ctxDialer, ok := dialer.(proxy.ContextDialer) + if !ok { + return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s", + reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer)) + } + conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr) + } else { + conn, dialErr = proxy.Dial(ctx, network, addr) } + if dialErr != nil { + return nil, dialErr + } + c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) if err != nil { return nil, err @@ -166,7 +214,7 @@ func (c *command) getHostWithPort() string { port = DefaultPort } - return fmt.Sprintf("%s:%d", host, port) + return net.JoinHostPort(host, strconv.Itoa(port)) } func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { @@ -194,7 +242,7 @@ func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { } } - addr = fmt.Sprintf("%s:%d", host, port) + addr = net.JoinHostPort(host, strconv.Itoa(port)) return } diff --git a/vendor/github.com/go-git/go-git/v5/prune.go b/vendor/github.com/go-git/go-git/v5/prune.go index cc5907a1..8e35b994 100644 --- a/vendor/github.com/go-git/go-git/v5/prune.go +++ b/vendor/github.com/go-git/go-git/v5/prune.go @@ -17,7 +17,7 @@ type PruneOptions struct { Handler PruneHandler } -var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported") +var ErrLooseObjectsNotSupported = errors.New("loose objects not supported") // DeleteObject deletes an object from a repository. // The type conveniently matches PruneHandler. diff --git a/vendor/github.com/go-git/go-git/v5/references.go b/vendor/github.com/go-git/go-git/v5/references.go deleted file mode 100644 index 6d96035a..00000000 --- a/vendor/github.com/go-git/go-git/v5/references.go +++ /dev/null @@ -1,264 +0,0 @@ -package git - -import ( - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// References returns a slice of Commits for the file at "path", starting from -// the commit provided that contains the file from the provided path. The last -// commit into the returned slice is the commit where the file was created. -// If the provided commit does not contains the specified path, a nil slice is -// returned. The commits are sorted in commit order, newer to older. -// -// Caveats: -// -// - Moves and copies are not currently supported. -// -// - Cherry-picks are not detected unless there are no commits between them and -// therefore can appear repeated in the list. (see git path-id for hints on how -// to fix this). -func references(c *object.Commit, path string) ([]*object.Commit, error) { - var result []*object.Commit - seen := make(map[plumbing.Hash]struct{}) - if err := walkGraph(&result, &seen, c, path); err != nil { - return nil, err - } - - // TODO result should be returned without ordering - sortCommits(result) - - // for merges of identical cherry-picks - return removeComp(path, result, equivalent) -} - -type commitSorterer struct { - l []*object.Commit -} - -func (s commitSorterer) Len() int { - return len(s.l) -} - -func (s commitSorterer) Less(i, j int) bool { - return s.l[i].Committer.When.Before(s.l[j].Committer.When) || - s.l[i].Committer.When.Equal(s.l[j].Committer.When) && - s.l[i].Author.When.Before(s.l[j].Author.When) -} - -func (s commitSorterer) Swap(i, j int) { - s.l[i], s.l[j] = s.l[j], s.l[i] -} - -// SortCommits sorts a commit list by commit date, from older to newer. -func sortCommits(l []*object.Commit) { - s := &commitSorterer{l} - sort.Sort(s) -} - -// Recursive traversal of the commit graph, generating a linear history of the -// path. -func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { - // check and update seen - if _, ok := (*seen)[current.Hash]; ok { - return nil - } - (*seen)[current.Hash] = struct{}{} - - // if the path is not in the current commit, stop searching. - if _, err := current.File(path); err != nil { - return nil - } - - // optimization: don't traverse branches that does not - // contain the path. - parents, err := parentsContainingPath(path, current) - if err != nil { - return err - } - switch len(parents) { - // if the path is not found in any of its parents, the path was - // created by this commit; we must add it to the revisions list and - // stop searching. This includes the case when current is the - // initial commit. - case 0: - *result = append(*result, current) - return nil - case 1: // only one parent contains the path - // if the file contents has change, add the current commit - different, err := differentContents(path, current, parents) - if err != nil { - return err - } - if len(different) == 1 { - *result = append(*result, current) - } - // in any case, walk the parent - return walkGraph(result, seen, parents[0], path) - default: // more than one parent contains the path - // TODO: detect merges that had a conflict, because they must be - // included in the result here. - for _, p := range parents { - err := walkGraph(result, seen, p, path) - if err != nil { - return err - } - } - } - return nil -} - -func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { - // TODO: benchmark this method making git.object.Commit.parent public instead of using - // an iterator - var result []*object.Commit - iter := c.Parents() - for { - parent, err := iter.Next() - if err == io.EOF { - return result, nil - } - if err != nil { - return nil, err - } - if _, err := parent.File(path); err == nil { - result = append(result, parent) - } - } -} - -// Returns an slice of the commits in "cs" that has the file "path", but with different -// contents than what can be found in "c". -func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - h, found := blobHash(path, c) - if !found { - return nil, object.ErrFileNotFound - } - for _, cx := range cs { - if hx, found := blobHash(path, cx); found && h != hx { - result = append(result, cx) - } - } - return result, nil -} - -// blobHash returns the hash of a path in a commit -func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { - file, err := commit.File(path) - if err != nil { - var empty plumbing.Hash - return empty, found - } - return file.Hash, true -} - -type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) - -// Returns a new slice of commits, with duplicates removed. Expects a -// sorted commit list. Duplication is defined according to "comp". It -// will always keep the first commit of a series of duplicated commits. -func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - if len(cs) == 0 { - return result, nil - } - result = append(result, cs[0]) - for i := 1; i < len(cs); i++ { - equals, err := comp(path, cs[i], cs[i-1]) - if err != nil { - return nil, err - } - if !equals { - result = append(result, cs[i]) - } - } - return result, nil -} - -// Equivalent commits are commits whose patch is the same. -func equivalent(path string, a, b *object.Commit) (bool, error) { - numParentsA := a.NumParents() - numParentsB := b.NumParents() - - // the first commit is not equivalent to anyone - // and "I think" merges can not be equivalent to anything - if numParentsA != 1 || numParentsB != 1 { - return false, nil - } - - diffsA, err := patch(a, path) - if err != nil { - return false, err - } - diffsB, err := patch(b, path) - if err != nil { - return false, err - } - - return sameDiffs(diffsA, diffsB), nil -} - -func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { - // get contents of the file in the commit - file, err := c.File(path) - if err != nil { - return nil, err - } - content, err := file.Contents() - if err != nil { - return nil, err - } - - // get contents of the file in the first parent of the commit - var contentParent string - iter := c.Parents() - parent, err := iter.Next() - if err != nil { - return nil, err - } - file, err = parent.File(path) - if err != nil { - contentParent = "" - } else { - contentParent, err = file.Contents() - if err != nil { - return nil, err - } - } - - // compare the contents of parent and child - return diff.Do(content, contentParent), nil -} - -func sameDiffs(a, b []diffmatchpatch.Diff) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !sameDiff(a[i], b[i]) { - return false - } - } - return true -} - -func sameDiff(a, b diffmatchpatch.Diff) bool { - if a.Type != b.Type { - return false - } - switch a.Type { - case 0: - return countLines(a.Text) == countLines(b.Text) - case 1, -1: - return a.Text == b.Text - default: - panic("unreachable") - } -} diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index d88e8e6f..7cc0db9b 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -5,9 +5,12 @@ import ( "errors" "fmt" "io" + "strings" + "time" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/internal/url" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/format/packfile" @@ -30,6 +33,7 @@ var ( ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") ErrForceNeeded = errors.New("some refs were not updated") ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec") + ErrEmptyUrls = errors.New("URLs cannot be empty") ) type NoMatchingRefSpecError struct { @@ -51,6 +55,9 @@ const ( // repo containing this remote, when not using the multi-ack // protocol. Setting this to 0 means there is no limit. maxHavesToVisitPerRef = 100 + + // peeledSuffix is the suffix used to build peeled reference names. + peeledSuffix = "^{}" ) // Remote represents a connection to a remote repository. @@ -91,7 +98,7 @@ func (r *Remote) Push(o *PushOptions) error { // the remote was already up-to-date. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { if err := o.Validate(); err != nil { @@ -102,14 +109,18 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) } - s, err := newSendPackSession(r.c.URLs[0], o.Auth) + if o.RemoteURL == "" { + o.RemoteURL = r.c.URLs[0] + } + + s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) if err != nil { return err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return err } @@ -119,6 +130,10 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return err } + if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil { + return err + } + isDelete := false allDelete := true for _, rs := range o.RefSpecs { @@ -178,12 +193,12 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { var hashesToPush []plumbing.Hash // Avoid the expensive revlist operation if we're only doing deletes. if !allDelete { - if r.c.IsFirstURLLocal() { + if url.IsLocalEndpoint(o.RemoteURL) { // If we're are pushing to a local repo, it might be much // faster to use a local storage layer to get the commits // to ignore, when calculating the object revlist. localStorer := filesystem.NewStorage( - osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) + osfs.New(o.RemoteURL), cache.NewObjectLRUDefault()) hashesToPush, err = revlist.ObjectsWithStorageForIgnores( r.s, localStorer, objects, haves) } else { @@ -209,17 +224,87 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return err } - if err = rs.Error(); err != nil { - return err + if rs != nil { + if err = rs.Error(); err != nil { + return err + } } - return r.updateRemoteReferenceStorage(req, rs) + return r.updateRemoteReferenceStorage(req) } func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { return !ar.Capabilities.Supports(capability.OFSDelta) } +func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error { + tags := make(map[plumbing.Reference]struct{}) + // get a list of all tags locally + for _, ref := range localRefs { + if strings.HasPrefix(string(ref.Name()), "refs/tags") { + tags[*ref] = struct{}{} + } + } + + remoteRefIter, err := remoteRefs.IterReferences() + if err != nil { + return err + } + + // remove any that are already on the remote + if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error { + delete(tags, *reference) + return nil + }); err != nil { + return err + } + + for tag := range tags { + tagObject, err := object.GetObject(r.s, tag.Hash()) + var tagCommit *object.Commit + if err != nil { + return fmt.Errorf("get tag object: %w", err) + } + + if tagObject.Type() != plumbing.TagObject { + continue + } + + annotatedTag, ok := tagObject.(*object.Tag) + if !ok { + return errors.New("could not get annotated tag object") + } + + tagCommit, err = object.GetCommit(r.s, annotatedTag.Target) + if err != nil { + return fmt.Errorf("get annotated tag commit: %w", err) + } + + // only include tags that are reachable from one of the refs + // already being pushed + for _, cmd := range req.Commands { + if tag.Name() == cmd.Name { + continue + } + + if strings.HasPrefix(cmd.Name.String(), "refs/tags") { + continue + } + + c, err := object.GetCommit(r.s, cmd.New) + if err != nil { + return fmt.Errorf("get commit %v: %w", cmd.Name, err) + } + + if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor { + req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()}) + } + } + } + + return nil +} + func (r *Remote) newReferenceUpdateRequest( o *PushOptions, localRefs []*plumbing.Reference, @@ -237,16 +322,33 @@ func (r *Remote) newReferenceUpdateRequest( } } - if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil { + if ar.Capabilities.Supports(capability.PushOptions) { + _ = req.Capabilities.Set(capability.PushOptions) + for k, v := range o.Options { + req.Options = append(req.Options, &packp.Option{Key: k, Value: v}) + } + } + + if o.Atomic && ar.Capabilities.Supports(capability.Atomic) { + _ = req.Capabilities.Set(capability.Atomic) + } + + if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil { + return nil, err } + if o.FollowTags { + if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil { + return nil, err + } + } + return req, nil } func (r *Remote) updateRemoteReferenceStorage( req *packp.ReferenceUpdateRequest, - result *packp.ReportStatus, ) error { for _, spec := range r.c.Fetch { @@ -280,7 +382,7 @@ func (r *Remote) updateRemoteReferenceStorage( // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { _, err := r.fetch(ctx, o) @@ -309,14 +411,18 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen o.RefSpecs = r.c.Fetch } - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + if o.RemoteURL == "" { + o.RemoteURL = r.c.URLs[0] + } + + s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return nil, err } @@ -340,14 +446,21 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen return nil, err } - refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) + refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) if err != nil { return nil, err } - req.Wants, err = getWants(r.s, refs) + if !req.Depth.IsZero() { + req.Shallows, err = r.s.Shallow() + if err != nil { + return nil, fmt.Errorf("existing checkout is not shallow") + } + } + + req.Wants, err = getWants(r.s, refs, o.Depth) if len(req.Wants) > 0 { - req.Haves, err = getHaves(localRefs, remoteRefs, r.s) + req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth) if err != nil { return nil, err } @@ -357,20 +470,58 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) + var updatedPrune bool + if o.Prune { + updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs) + if err != nil { + return nil, err + } + } + + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err } if !updated { + updated, err = depthChanged(req.Shallows, r.s) + if err != nil { + return nil, fmt.Errorf("error checking depth change: %v", err) + } + } + + if !updated && !updatedPrune { return remoteRefs, NoErrAlreadyUpToDate } return remoteRefs, nil } -func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { - c, ep, err := newClient(url) +func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) { + after, err := s.Shallow() + if err != nil { + return false, err + } + + if len(before) != len(after) { + return true, nil + } + + bm := make(map[plumbing.Hash]bool, len(before)) + for _, b := range before { + bm[b] = true + } + for _, a := range after { + if _, ok := bm[a]; !ok { + return true, nil + } + } + + return false, nil +} + +func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.UploadPackSession, error) { + c, ep, err := newClient(url, insecure, cabundle, proxyOpts) if err != nil { return nil, err } @@ -378,8 +529,8 @@ func newUploadPackSession(url string, auth transport.AuthMethod) (transport.Uplo return c.NewUploadPackSession(ep, auth) } -func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - c, ep, err := newClient(url) +func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.ReceivePackSession, error) { + c, ep, err := newClient(url, insecure, cabundle, proxyOpts) if err != nil { return nil, err } @@ -387,11 +538,14 @@ func newSendPackSession(url string, auth transport.AuthMethod) (transport.Receiv return c.NewReceivePackSession(ep, auth) } -func newClient(url string) (transport.Transport, *transport.Endpoint, error) { +func newClient(url string, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.Transport, *transport.Endpoint, error) { ep, err := transport.NewEndpoint(url) if err != nil { return nil, nil, err } + ep.InsecureSkipTLS = insecure + ep.CaBundle = cabundle + ep.Proxy = proxyOpts c, err := client.NewClient(ep) if err != nil { @@ -406,6 +560,10 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl reader, err := s.UploadPack(ctx, req) if err != nil { + if errors.Is(err, transport.ErrEmptyUploadPackRequest) { + // XXX: no packfile provided, everything is up-to-date. + return nil + } return err } @@ -424,12 +582,34 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl return err } +func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) { + var updatedPrune bool + for _, spec := range specs { + rev := spec.Reverse() + for _, ref := range localRefs { + if !rev.Match(ref.Name()) { + continue + } + _, err := remoteRefs.Reference(rev.Dst(ref.Name())) + if errors.Is(err, plumbing.ErrReferenceNotFound) { + updatedPrune = true + err := r.s.RemoveReference(ref.Name()) + if err != nil { + return false, err + } + } + } + } + return updatedPrune, nil +} + func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, prune bool, + forceWithLease *ForceWithLease, ) error { // This references dictionary will be used to search references by name. refsDict := make(map[string]*plumbing.Reference) @@ -443,7 +623,7 @@ func (r *Remote) addReferencesToUpdate( return err } } else { - err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req) + err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease) if err != nil { return err } @@ -465,20 +645,25 @@ func (r *Remote) addOrUpdateReferences( refsDict map[string]*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, + forceWithLease *ForceWithLease, ) error { - // If it is not a wilcard refspec we can directly search for the reference + // If it is not a wildcard refspec we can directly search for the reference // in the references dictionary. if !rs.IsWildcard() { ref, ok := refsDict[rs.Src()] if !ok { + commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src())) + if err == nil { + return r.addCommit(rs, remoteRefs, commit.Hash, req) + } return nil } - return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) + return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease) } for _, ref := range localRefs { - err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) + err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease) if err != nil { return err } @@ -525,9 +710,46 @@ func (r *Remote) deleteReferences(rs config.RefSpec, }) } +func (r *Remote) addCommit(rs config.RefSpec, + remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash, + req *packp.ReferenceUpdateRequest) error { + + if rs.IsWildcard() { + return errors.New("can't use wildcard together with hash refspecs") + } + + cmd := &packp.Command{ + Name: rs.Dst(""), + Old: plumbing.ZeroHash, + New: localCommit, + } + remoteRef, err := remoteRefs.Reference(cmd.Name) + if err == nil { + if remoteRef.Type() != plumbing.HashReference { + // TODO: check actual git behavior here + return nil + } + + cmd.Old = remoteRef.Hash() + } else if err != plumbing.ErrReferenceNotFound { + return err + } + if cmd.Old == cmd.New { + return nil + } + if !rs.IsForceUpdate() { + if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { + return err + } + } + + req.Commands = append(req.Commands, cmd) + return nil +} + func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, - req *packp.ReferenceUpdateRequest) error { + req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error { if localRef.Type() != plumbing.HashReference { return nil @@ -546,7 +768,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { - //TODO: check actual git behavior here + // TODO: check actual git behavior here return nil } @@ -559,7 +781,11 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, return nil } - if !rs.IsForceUpdate() { + if forceWithLease != nil { + if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil { + return err + } + } else if !rs.IsForceUpdate() { if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { return err } @@ -569,6 +795,31 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, return nil } +func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error { + remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name) + + ref, err := storer.ResolveReference( + r.s, + plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1))) + if err != nil { + return err + } + + if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) { + expectedOID := ref.Hash() + + if !forceWithLease.Hash.IsZero() { + expectedOID = forceWithLease.Hash + } + + if cmd.Old != expectedOID { + return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) + } + } + + return nil +} + func (r *Remote) references() ([]*plumbing.Reference, error) { var localRefs []*plumbing.Reference @@ -620,6 +871,7 @@ func getHavesFromRef( remoteRefs map[plumbing.Hash]bool, s storage.Storer, haves map[plumbing.Hash]bool, + depth int, ) error { h := ref.Hash() if haves[h] { @@ -645,7 +897,13 @@ func getHavesFromRef( // commits from the history of each ref. walker := object.NewCommitPreorderIter(commit, haves, nil) toVisit := maxHavesToVisitPerRef - return walker.ForEach(func(c *object.Commit) error { + // But only need up to the requested depth + if depth > 0 && depth < maxHavesToVisitPerRef { + toVisit = depth + } + // It is safe to ignore any error here as we are just trying to find the references that we already have + // An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s) + _ = walker.ForEach(func(c *object.Commit) error { haves[c.Hash] = true toVisit-- // If toVisit starts out at 0 (indicating there is no @@ -656,12 +914,15 @@ func getHavesFromRef( } return nil }) + + return nil } func getHaves( localRefs []*plumbing.Reference, remoteRefStorer storer.ReferenceStorer, s storage.Storer, + depth int, ) ([]plumbing.Hash, error) { haves := map[plumbing.Hash]bool{} @@ -682,7 +943,7 @@ func getHaves( continue } - err = getHavesFromRef(ref, remoteRefs, s, haves) + err = getHavesFromRef(ref, remoteRefs, s, haves, depth) if err != nil { return nil, err } @@ -702,42 +963,41 @@ func calculateRefs( spec []config.RefSpec, remoteRefs storer.ReferenceStorer, tagMode TagMode, -) (memory.ReferenceStorage, error) { +) (memory.ReferenceStorage, [][]*plumbing.Reference, error) { if tagMode == AllTags { spec = append(spec, refspecAllTags) } refs := make(memory.ReferenceStorage) - for _, s := range spec { - if err := doCalculateRefs(s, remoteRefs, refs); err != nil { - return nil, err + // list of references matched for each spec + specToRefs := make([][]*plumbing.Reference, len(spec)) + for i := range spec { + var err error + specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs) + if err != nil { + return nil, nil, err } } - return refs, nil + return refs, specToRefs, nil } func doCalculateRefs( s config.RefSpec, remoteRefs storer.ReferenceStorer, refs memory.ReferenceStorage, -) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } +) ([]*plumbing.Reference, error) { + var refList []*plumbing.Reference if s.IsExactSHA1() { ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) - return refs.SetReference(ref) + + refList = append(refList, ref) + return refList, refs.SetReference(ref) } var matched bool - err = iter.ForEach(func(ref *plumbing.Reference) error { - if !s.Match(ref.Name()) { - return nil - } - + onMatched := func(ref *plumbing.Reference) error { if ref.Type() == plumbing.SymbolicReference { target, err := storer.ResolveReference(remoteRefs, ref.Name()) if err != nil { @@ -752,25 +1012,49 @@ func doCalculateRefs( } matched = true - if err := refs.SetReference(ref); err != nil { - return err - } - - if !s.IsWildcard() { - return storer.ErrStop - } - - return nil - }) - - if !matched && !s.IsWildcard() { - return NoMatchingRefSpecError{refSpec: s} + refList = append(refList, ref) + return refs.SetReference(ref) } - return err + var ret error + if s.IsWildcard() { + iter, err := remoteRefs.IterReferences() + if err != nil { + return nil, err + } + ret = iter.ForEach(func(ref *plumbing.Reference) error { + if !s.Match(ref.Name()) { + return nil + } + + return onMatched(ref) + }) + } else { + var resolvedRef *plumbing.Reference + src := s.Src() + resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src)) + if ret == nil { + ret = onMatched(resolvedRef) + } + } + + if !matched && !s.IsWildcard() { + return nil, NoMatchingRefSpecError{refSpec: s} + } + + return refList, ret } -func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { +func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) { + // If depth is anything other than 1 and the repo has shallow commits then just because we have the commit + // at the reference doesn't mean that we don't still need to fetch the parents + shallow := false + if depth != 1 { + if s, _ := localStorer.Shallow(); len(s) > 0 { + shallow = true + } + } + wants := map[plumbing.Hash]bool{} for _, ref := range refs { hash := ref.Hash() @@ -779,7 +1063,7 @@ func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumb return nil, err } - if !exists { + if !exists || shallow { wants[hash] = true } } @@ -815,7 +1099,7 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } - ff, err := isFastForward(s, cmd.Old, cmd.New) + ff, err := isFastForward(s, cmd.Old, cmd.New, nil) if err != nil { return err } @@ -827,14 +1111,28 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe return nil } -func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) { +func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) { c, err := object.GetCommit(s, new) if err != nil { return false, err } + parentsToIgnore := []plumbing.Hash{} + if earliestShallow != nil { + earliestCommit, err := object.GetCommit(s, *earliestShallow) + if err != nil { + return false, err + } + + parentsToIgnore = earliestCommit.ParentHashes + } + found := false - iter := object.NewCommitPreorderIter(c, nil, nil) + // stop iterating at the earliest shallow commit, ignoring its parents + // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents. + // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no + // real way of telling whether it will be a fast-forward merge. + iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore) err = iter.ForEach(func(c *object.Commit) error { if c.Hash != old { return nil @@ -922,34 +1220,35 @@ func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.P func (r *Remote) updateLocalReferenceStorage( specs []config.RefSpec, fetchedRefs, remoteRefs memory.ReferenceStorage, + specToRefs [][]*plumbing.Reference, tagMode TagMode, force bool, ) (updated bool, err error) { isWildcard := true forceNeeded := false - for _, spec := range specs { + for i, spec := range specs { if !spec.IsWildcard() { isWildcard = false } - for _, ref := range fetchedRefs { - if !spec.Match(ref.Name()) && !spec.IsExactSHA1() { - continue - } - + for _, ref := range specToRefs[i] { if ref.Type() != plumbing.HashReference { continue } localName := spec.Dst(ref.Name()) + // If localName doesn't start with "refs/" then treat as a branch. + if !strings.HasPrefix(localName.String(), "refs/") { + localName = plumbing.NewBranchReferenceName(localName.String()) + } old, _ := storer.ResolveReference(r.s, localName) new := plumbing.NewHashReference(localName, ref.Hash()) - // If the ref exists locally as a branch and force is not specified, - // only update if the new ref is an ancestor of the old - if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { - ff, err := isFastForward(r.s, old.Hash(), new.Hash()) + // If the ref exists locally as a non-tag and force is not + // specified, only update if the new ref is an ancestor of the old + if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() { + ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil) if err != nil { return updated, err } @@ -1024,15 +1323,40 @@ func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, e } // List the references on the remote repository. +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { + return r.list(ctx, o) +} + func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + timeout := o.Timeout + // Default to the old hardcoded 10s value if a timeout is not explicitly set. + if timeout == 0 { + timeout = 10 + } + if timeout < 0 { + return nil, fmt.Errorf("invalid timeout: %d", timeout) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + return r.ListContext(ctx, o) +} + +func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { + if r.c == nil || len(r.c.URLs) == 0 { + return nil, ErrEmptyUrls + } + + s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return nil, err } @@ -1048,13 +1372,22 @@ func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { } var resultRefs []*plumbing.Reference - err = refs.ForEach(func(ref *plumbing.Reference) error { - resultRefs = append(resultRefs, ref) - return nil - }) - if err != nil { - return nil, err + if o.PeelingOption == AppendPeeled || o.PeelingOption == IgnorePeeled { + err = refs.ForEach(func(ref *plumbing.Reference) error { + resultRefs = append(resultRefs, ref) + return nil + }) + if err != nil { + return nil, err + } } + + if o.PeelingOption == AppendPeeled || o.PeelingOption == OnlyPeeled { + for k, v := range ar.Peeled { + resultRefs = append(resultRefs, plumbing.NewReferenceFromStrings(k+"^{}", v.String())) + } + } + return resultRefs, nil } @@ -1100,7 +1433,6 @@ func pushHashes( useRefDeltas bool, allDelete bool, ) (*packp.ReportStatus, error) { - rd, wr := io.Pipe() config, err := s.Config() @@ -1164,3 +1496,33 @@ outer: return r.s.SetShallow(shallows) } + +func (r *Remote) checkRequireRemoteRefs(requires []config.RefSpec, remoteRefs storer.ReferenceStorer) error { + for _, require := range requires { + if require.IsWildcard() { + return fmt.Errorf("wildcards not supported in RequireRemoteRefs, got %s", require.String()) + } + + name := require.Dst("") + remote, err := remoteRefs.Reference(name) + if err != nil { + return fmt.Errorf("remote ref %s required to be %s but is absent", name.String(), require.Src()) + } + + var requireHash string + if require.IsExactSHA1() { + requireHash = require.Src() + } else { + target, err := storer.ResolveReference(remoteRefs, plumbing.ReferenceName(require.Src())) + if err != nil { + return fmt.Errorf("could not resolve ref %s in RequireRemoteRefs", require.Src()) + } + requireHash = target.Hash().String() + } + + if remote.Hash().String() != requireHash { + return fmt.Errorf("remote ref %s required to be %s but is %s", name.String(), requireHash, remote.Hash().String()) + } + } + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 36fc1f5b..a57c7141 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -3,34 +3,37 @@ package git import ( "bytes" "context" + "crypto" "encoding/hex" "errors" "fmt" "io" - stdioutil "io/ioutil" "os" "path" "path/filepath" "strings" "time" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - + "dario.cat/mergo" + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-billy/v5/util" "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/internal/path_util" "github.com/go-git/go-git/v5/internal/revision" + "github.com/go-git/go-git/v5/internal/url" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" + formatcfg "github.com/go-git/go-git/v5/plumbing/format/config" "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/storage" "github.com/go-git/go-git/v5/storage/filesystem" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/imdario/mergo" - "golang.org/x/crypto/openpgp" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" ) // GitDirName this is a special folder where all the git stuff is. @@ -48,17 +51,21 @@ var ( // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("Packed objects not supported") + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository does not exist") + ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") + ErrUnableToResolveCommit = errors.New("unable to resolve commit") + ErrPackedObjectsNotSupported = errors.New("packed objects not supported") + ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") + ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy") + ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes") ) // Repository represents a git repository @@ -69,14 +76,34 @@ type Repository struct { wt billy.Filesystem } +type InitOptions struct { + // The default branch (e.g. "refs/heads/master") + DefaultBranch plumbing.ReferenceName +} + // Init creates an empty git repository, based on the given Storer and worktree. // The worktree Filesystem is optional, if nil a bare repository is created. If // the given storer is not empty ErrRepositoryAlreadyExists is returned func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { + options := InitOptions{ + DefaultBranch: plumbing.Master, + } + return InitWithOptions(s, worktree, options) +} + +func InitWithOptions(s storage.Storer, worktree billy.Filesystem, options InitOptions) (*Repository, error) { if err := initStorer(s); err != nil { return nil, err } + if options.DefaultBranch == "" { + options.DefaultBranch = plumbing.Master + } + + if err := options.DefaultBranch.Validate(); err != nil { + return nil, err + } + r := newRepository(s, worktree) _, err := r.Reference(plumbing.HEAD, false) switch err { @@ -87,7 +114,7 @@ func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { return nil, err } - h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) + h := plumbing.NewSymbolicReference(plumbing.HEAD, options.DefaultBranch) if err := s.SetReference(h); err != nil { return nil, err } @@ -190,10 +217,6 @@ func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { // Clone a repository into the given Storer and worktree Filesystem with the // given options, if worktree is nil a bare repository is created. If the given // storer is not empty ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { return CloneContext(context.Background(), s, worktree, o) } @@ -203,7 +226,7 @@ func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repos // given storer is not empty ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func CloneContext( ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, @@ -220,9 +243,19 @@ func CloneContext( // if the repository will have worktree (non-bare) or not (bare), if the path // is not empty ErrRepositoryAlreadyExists is returned. func PlainInit(path string, isBare bool) (*Repository, error) { + return PlainInitWithOptions(path, &PlainInitOptions{ + Bare: isBare, + }) +} + +func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) { + if opts == nil { + opts = &PlainInitOptions{} + } + var wt, dot billy.Filesystem - if isBare { + if opts.Bare { dot = osfs.New(path) } else { wt = osfs.New(path) @@ -231,7 +264,31 @@ func PlainInit(path string, isBare bool) (*Repository, error) { s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - return Init(s, wt) + r, err := InitWithOptions(s, wt, opts.InitOptions) + if err != nil { + return nil, err + } + + cfg, err := r.Config() + if err != nil { + return nil, err + } + + if opts.ObjectFormat != "" { + if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 { + return nil, ErrSHA256NotSupported + } + + cfg.Core.RepositoryFormatVersion = formatcfg.Version_1 + cfg.Extensions.ObjectFormat = opts.ObjectFormat + } + + err = r.Storer.SetConfig(cfg) + if err != nil { + return nil, err + } + + return r, err } // PlainOpen opens a git repository from the given path. It detects if the @@ -275,21 +332,30 @@ func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) } func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { - if path, err = filepath.Abs(path); err != nil { + path, err = path_util.ReplaceTildeWithHome(path) + if err != nil { return nil, nil, err } - pathinfo, err := os.Stat(path) - if !os.IsNotExist(err) { - if !pathinfo.IsDir() && detect { - path = filepath.Dir(path) - } + if path, err = filepath.Abs(path); err != nil { + return nil, nil, err } var fs billy.Filesystem var fi os.FileInfo for { fs = osfs.New(path) + + pathinfo, err := fs.Stat("/") + if !os.IsNotExist(err) { + if pathinfo == nil { + return nil, nil, err + } + if !pathinfo.IsDir() && detect { + fs = osfs.New(filepath.Dir(path)) + } + } + fi, err = fs.Stat(GitDirName) if err == nil { // no error; stop @@ -332,7 +398,7 @@ func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Files } defer ioutil.CheckClose(f, &err) - b, err := stdioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return nil, err } @@ -361,7 +427,7 @@ func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err return nil, err } - b, err := stdioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return nil, err } @@ -398,7 +464,7 @@ func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) // ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. // // TODO(mcuadros): move isBare to CloneOptions in v5 @@ -409,6 +475,9 @@ func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOp return nil, err } + if o.Mirror { + isBare = true + } r, err := PlainInit(path, isBare) if err != nil { return nil, err @@ -433,7 +502,7 @@ func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { } func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { - fi, err := os.Stat(path) + fi, err := osfs.Default.Stat(path) if err != nil { if os.IsNotExist(err) { return true, true, nil @@ -446,44 +515,30 @@ func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err er return false, false, fmt.Errorf("path is not a directory: %s", path) } - f, err := os.Open(path) + files, err := osfs.Default.ReadDir(path) if err != nil { return false, false, err } - defer ioutil.CheckClose(f, &err) - - _, err = f.Readdirnames(1) - if err == io.EOF { + if len(files) == 0 { return true, false, nil } - if err != nil { - return false, false, err - } - return false, false, nil } func cleanUpDir(path string, all bool) error { if all { - return os.RemoveAll(path) + return util.RemoveAll(osfs.Default, path) } - f, err := os.Open(path) + files, err := osfs.Default.ReadDir(path) if err != nil { return err } - defer ioutil.CheckClose(f, &err) - - names, err := f.Readdirnames(-1) - if err != nil { - return err - } - - for _, name := range names { - if err := os.RemoveAll(filepath.Join(path, name)); err != nil { + for _, fi := range files { + if err := util.RemoveAll(osfs.Default, osfs.Default.Join(path, fi.Name())); err != nil { return err } } @@ -675,7 +730,10 @@ func (r *Repository) DeleteBranch(name string) error { // CreateTag creates a tag. If opts is included, the tag is an annotated tag, // otherwise a lightweight tag is created. func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { - rname := plumbing.ReferenceName(path.Join("refs", "tags", name)) + rname := plumbing.NewTagReferenceName(name) + if err := rname.Validate(); err != nil { + return nil, err + } _, err := r.Storer.Reference(rname) switch err { @@ -766,21 +824,20 @@ func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) // If you want to check to see if the tag is an annotated tag, you can call // TagObject on the hash of the reference in ForEach: // -// ref, err := r.Tag("v0.1.0") -// if err != nil { -// // Handle error -// } -// -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// } +// ref, err := r.Tag("v0.1.0") +// if err != nil { +// // Handle error +// } // +// obj, err := r.TagObject(ref.Hash()) +// switch err { +// case nil: +// // Tag object present +// case plumbing.ErrObjectNotFound: +// // Not a tag object +// default: +// // Some other error +// } func (r *Repository) Tag(name string) (*plumbing.Reference, error) { ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) if err != nil { @@ -831,22 +888,50 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { } c := &config.RemoteConfig{ - Name: o.RemoteName, - URLs: []string{o.URL}, - Fetch: r.cloneRefSpec(o), + Name: o.RemoteName, + URLs: []string{o.URL}, + Fetch: r.cloneRefSpec(o), + Mirror: o.Mirror, } if _, err := r.CreateRemote(c); err != nil { return err } + // When the repository to clone is on the local machine, + // instead of using hard links, automatically setup .git/objects/info/alternates + // to share the objects with the source repository + if o.Shared { + if !url.IsLocalEndpoint(o.URL) { + return ErrAlternatePathNotSupported + } + altpath := o.URL + remoteRepo, err := PlainOpen(o.URL) + if err != nil { + return fmt.Errorf("failed to open remote repository: %w", err) + } + conf, err := remoteRepo.Config() + if err != nil { + return fmt.Errorf("failed to read remote repository configuration: %w", err) + } + if !conf.Core.IsBare { + altpath = path.Join(altpath, GitDirName) + } + if err := r.Storer.AddAlternate(altpath); err != nil { + return fmt.Errorf("failed to add alternate file to git objects dir: %w", err) + } + } + ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ - RefSpecs: c.Fetch, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Tags: o.Tags, - RemoteName: o.RemoteName, + RefSpecs: c.Fetch, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Tags: o.Tags, + RemoteName: o.RemoteName, + InsecureSkipTLS: o.InsecureSkipTLS, + CABundle: o.CABundle, + ProxyOptions: o.ProxyOptions, }, o.ReferenceName) if err != nil { return err @@ -873,7 +958,13 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { if o.RecurseSubmodules != NoRecurseSubmodules { if err := w.updateSubmodules(&SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, + Depth: func() int { + if o.ShallowSubmodules { + return 1 + } + return 0 + }(), + Auth: o.Auth, }); err != nil { return err } @@ -884,7 +975,7 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { return err } - if ref.Name().IsBranch() { + if !o.Mirror && ref.Name().IsBranch() { branchRef := ref.Name() branchName := strings.Split(string(branchRef), "refs/heads/")[1] @@ -892,11 +983,13 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { Name: branchName, Merge: branchRef, } + if o.RemoteName == "" { b.Remote = "origin" } else { b.Remote = o.RemoteName } + if err := r.CreateBranch(b); err != nil { return err } @@ -913,6 +1006,8 @@ const ( func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { switch { + case o.Mirror: + return []config.RefSpec{"+refs/*:refs/*"} case o.ReferenceName.IsTag(): return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), @@ -920,7 +1015,6 @@ func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { case o.SingleBranch && o.ReferenceName == plumbing.HEAD: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), - config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), } case o.SingleBranch: return []config.RefSpec{ @@ -982,7 +1076,7 @@ func (r *Repository) fetchAndUpdateReferences( return nil, err } - resolvedRef, err := storer.ResolveReference(remoteRefs, ref) + resolvedRef, err := expand_ref(remoteRefs, ref) if err != nil { return nil, err } @@ -1099,7 +1193,7 @@ func (r *Repository) Fetch(o *FetchOptions) error { // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { if err := o.Validate(); err != nil { @@ -1126,7 +1220,7 @@ func (r *Repository) Push(o *PushOptions) error { // FetchOptions.RemoteName. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { if err := o.Validate(); err != nil { @@ -1253,26 +1347,25 @@ func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { // If you want to check to see if the tag is an annotated tag, you can call // TagObject on the hash Reference passed in through ForEach: // -// iter, err := r.Tags() -// if err != nil { -// // Handle error -// } -// -// if err := iter.ForEach(func (ref *plumbing.Reference) error { -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// return err -// } -// }); err != nil { -// // Handle outer iterator error -// } +// iter, err := r.Tags() +// if err != nil { +// // Handle error +// } // +// if err := iter.ForEach(func (ref *plumbing.Reference) error { +// obj, err := r.TagObject(ref.Hash()) +// switch err { +// case nil: +// // Tag object present +// case plumbing.ErrObjectNotFound: +// // Not a tag object +// default: +// // Some other error +// return err +// } +// }); err != nil { +// // Handle outer iterator error +// } func (r *Repository) Tags() (storer.ReferenceIter, error) { refIter, err := r.Storer.IterReferences() if err != nil { @@ -1431,14 +1524,35 @@ func (r *Repository) Worktree() (*Worktree, error) { return &Worktree{r: r, Filesystem: r.wt}, nil } +func expand_ref(s storer.ReferenceStorer, ref plumbing.ReferenceName) (*plumbing.Reference, error) { + // For improving troubleshooting, this preserves the error for the provided `ref`, + // and returns the error for that specific ref in case all parse rules fails. + var ret error + for _, rule := range plumbing.RefRevParseRules { + resolvedRef, err := storer.ResolveReference(s, plumbing.ReferenceName(fmt.Sprintf(rule, ref))) + + if err == nil { + return resolvedRef, nil + } else if ret == nil { + ret = err + } + } + + return nil, ret +} + // ResolveRevision resolves revision to corresponding hash. It will always // resolve to a commit hash, not a tree or annotated tag. // // Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, // refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full) -func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { - p := revision.NewParserFromString(string(rev)) +func (r *Repository) ResolveRevision(in plumbing.Revision) (*plumbing.Hash, error) { + rev := in.String() + if rev == "" { + return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound + } + p := revision.NewParserFromString(rev) items, err := p.Parse() if err != nil { @@ -1456,13 +1570,9 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...) - for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { - ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) - - if err == nil { - tryHashes = append(tryHashes, ref.Hash()) - break - } + ref, err := expand_ref(r.Storer, plumbing.ReferenceName(revisionRef)) + if err == nil { + tryHashes = append(tryHashes, ref.Hash()) } // in ambiguous cases, `git rev-parse` will emit a warning, but @@ -1562,13 +1672,17 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err } if c == nil { - return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) + return &plumbing.ZeroHash, fmt.Errorf("no commit message match regexp: %q", re.String()) } commit = c } } + if commit == nil { + return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound + } + return &commit.Hash, nil } @@ -1657,8 +1771,43 @@ func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { return nil } +// Merge merges the reference branch into the current branch. +// +// If the merge is not possible (or supported) returns an error without changing +// the HEAD for the current branch. Possible errors include: +// - The merge strategy is not supported. +// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible). +func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error { + if opts.Strategy != FastForwardMerge { + return ErrUnsupportedMergeStrategy + } + + // Ignore error as not having a shallow list is optional here. + shallowList, _ := r.Storer.Shallow() + var earliestShallow *plumbing.Hash + if len(shallowList) > 0 { + earliestShallow = &shallowList[0] + } + + head, err := r.Head() + if err != nil { + return err + } + + ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow) + if err != nil { + return err + } + + if !ff { + return ErrFastForwardMergeNotPossible + } + + return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash())) +} + // createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter +// of creating a new pack. It is used so the PackfileWriter // deferred close has the right scope. func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { ow := newObjectWalker(r.Storer) diff --git a/vendor/github.com/go-git/go-git/v5/signer.go b/vendor/github.com/go-git/go-git/v5/signer.go new file mode 100644 index 00000000..e3ef7ebd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/signer.go @@ -0,0 +1,33 @@ +package git + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +// signableObject is an object which can be signed. +type signableObject interface { + EncodeWithoutSignature(o plumbing.EncodedObject) error +} + +// Signer is an interface for signing git objects. +// message is a reader containing the encoded object to be signed. +// Implementors should return the encoded signature and an error if any. +// See https://git-scm.com/docs/gitformat-signature for more information. +type Signer interface { + Sign(message io.Reader) ([]byte, error) +} + +func signObject(signer Signer, obj signableObject) ([]byte, error) { + encoded := &plumbing.MemoryObject{} + if err := obj.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + r, err := encoded.Reader() + if err != nil { + return nil, err + } + + return signer.Sign(r) +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/BUILD.bazel index a9145815..fde201a4 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/BUILD.bazel +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "dotgit.go", "dotgit_rewrite_packed_refs.go", "dotgit_setref.go", + "reader.go", "repository_filesystem.go", "writers.go", ], @@ -14,11 +15,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/go-git/go-billy/v5:go-billy", - "//vendor/github.com/go-git/go-billy/v5/osfs", + "//vendor/github.com/go-git/go-billy/v5/helper/chroot", "//vendor/github.com/go-git/go-git/v5/plumbing", "//vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile", "//vendor/github.com/go-git/go-git/v5/plumbing/format/objfile", "//vendor/github.com/go-git/go-git/v5/plumbing/format/packfile", + "//vendor/github.com/go-git/go-git/v5/plumbing/hash", "//vendor/github.com/go-git/go-git/v5/storage", "//vendor/github.com/go-git/go-git/v5/utils/ioutil", ], diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go index 6c386f79..31c46948 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go @@ -7,19 +7,22 @@ import ( "errors" "fmt" "io" - stdioutil "io/ioutil" "os" + "path" "path/filepath" + "reflect" + "runtime" "sort" "strings" "time" - "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-git/v5/storage" "github.com/go-git/go-git/v5/utils/ioutil" "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" ) const ( @@ -38,6 +41,7 @@ const ( remotesPath = "remotes" logsPath = "logs" worktreesPath = "worktrees" + alternatesPath = "alternates" tmpPackedRefsPrefix = "._packed-refs" @@ -78,6 +82,10 @@ type Options struct { // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool + // AlternatesFS provides the billy filesystem to be used for Git Alternates. + // If none is provided, it falls back to using the underlying instance used for + // DotGit. + AlternatesFS billy.Filesystem } // The DotGit type represents a local git repository on disk. This @@ -552,8 +560,8 @@ func (d *DotGit) hasPack(h plumbing.Hash) error { } func (d *DotGit) objectPath(h plumbing.Hash) string { - hash := h.String() - return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) + hex := h.String() + return d.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize]) } // incomingObjectPath is intended to add support for a git pre-receive hook @@ -563,15 +571,16 @@ func (d *DotGit) objectPath(h plumbing.Hash) string { // // More on git hooks found here : https://git-scm.com/docs/githooks // More on 'quarantine'/incoming directory here: -// https://git-scm.com/docs/git-receive-pack +// +// https://git-scm.com/docs/git-receive-pack func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { hString := h.String() if d.incomingDirName == "" { - return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) + return d.fs.Join(objectsPath, hString[0:2], hString[2:hash.HexSize]) } - return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) + return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:hash.HexSize]) } // hasIncomingObjects searches for an incoming directory and keeps its name @@ -581,7 +590,9 @@ func (d *DotGit) hasIncomingObjects() bool { directoryContents, err := d.fs.ReadDir(objectsPath) if err == nil { for _, file := range directoryContents { - if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { + if file.IsDir() && (strings.HasPrefix(file.Name(), "tmp_objdir-incoming-") || + // Before Git 2.35 incoming commits directory had another prefix + strings.HasPrefix(file.Name(), "incoming-")) { d.incomingDirName = file.Name() } } @@ -645,7 +656,7 @@ func (d *DotGit) ObjectDelete(h plumbing.Hash) error { } func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { - b, err := stdioutil.ReadAll(rd) + b, err := io.ReadAll(rd) if err != nil { return nil, err } @@ -716,48 +727,56 @@ func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { return d.packedRef(name) } -func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { +func (d *DotGit) findPackedRefsInFile(f billy.File, recv refsRecv) error { s := bufio.NewScanner(f) - var refs []*plumbing.Reference for s.Scan() { ref, err := d.processLine(s.Text()) if err != nil { - return nil, err + return err } - if ref != nil { - refs = append(refs, ref) + if !recv(ref) { + // skip parse + return nil } } - - return refs, s.Err() + if err := s.Err(); err != nil { + return err + } + return nil } -func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { +// refsRecv: returning true means that the reference continues to be resolved, otherwise it is stopped, which will speed up the lookup of a single reference. +type refsRecv func(*plumbing.Reference) bool + +func (d *DotGit) findPackedRefs(recv refsRecv) error { f, err := d.fs.Open(packedRefsPath) if err != nil { if os.IsNotExist(err) { - return nil, nil + return nil } - return nil, err + return err } defer ioutil.CheckClose(f, &err) - return d.findPackedRefsInFile(f) + return d.findPackedRefsInFile(f, recv) } func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { - refs, err := d.findPackedRefs() - if err != nil { + var ref *plumbing.Reference + if err := d.findPackedRefs(func(r *plumbing.Reference) bool { + if r != nil && r.Name() == name { + ref = r + // ref found + return false + } + return true + }); err != nil { return nil, err } - - for _, ref := range refs { - if ref.Name() == name { - return ref, nil - } + if ref != nil { + return ref, nil } - return nil, plumbing.ErrReferenceNotFound } @@ -777,34 +796,22 @@ func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { return d.rewritePackedRefsWithoutRef(name) } -func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefs() - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true +func refsRecvFunc(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) refsRecv { + return func(r *plumbing.Reference) bool { + if r != nil && !seen[r.Name()] { + *refs = append(*refs, r) + seen[r.Name()] = true } + return true } - return nil +} + +func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { + return d.findPackedRefs(refsRecvFunc(refs, seen)) } func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefsInFile(f) - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil + return d.findPackedRefsInFile(f, refsRecvFunc(refs, seen)) } func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( @@ -943,6 +950,7 @@ func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []strin files, err := d.fs.ReadDir(d.fs.Join(relPath...)) if err != nil { if os.IsNotExist(err) { + // a race happened, and our directory is gone now return nil } @@ -960,6 +968,10 @@ func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []strin } ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) + if os.IsNotExist(err) { + // a race happened, and our file is gone now + continue + } if err != nil { return err } @@ -1101,38 +1113,93 @@ func (d *DotGit) Module(name string) (billy.Filesystem, error) { return d.fs.Chroot(d.fs.Join(modulePath, name)) } +func (d *DotGit) AddAlternate(remote string) error { + altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) + + f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) + if err != nil { + return fmt.Errorf("cannot open file: %w", err) + } + defer f.Close() + + // locking in windows throws an error, based on comments + // https://github.com/go-git/go-git/pull/860#issuecomment-1751823044 + // do not lock on windows platform. + if runtime.GOOS != "windows" { + if err = f.Lock(); err != nil { + return fmt.Errorf("cannot lock file: %w", err) + } + defer f.Unlock() + } + + line := path.Join(remote, objectsPath) + "\n" + _, err = io.WriteString(f, line) + if err != nil { + return fmt.Errorf("error writing 'alternates' file: %w", err) + } + + return nil +} + // Alternates returns DotGit(s) based off paths in objects/info/alternates if // available. This can be used to checks if it's a shared repository. func (d *DotGit) Alternates() ([]*DotGit, error) { - altpath := d.fs.Join("objects", "info", "alternates") + altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) f, err := d.fs.Open(altpath) if err != nil { return nil, err } defer f.Close() + fs := d.options.AlternatesFS + if fs == nil { + fs = d.fs + } + var alternates []*DotGit + seen := make(map[string]struct{}) // Read alternate paths line-by-line and create DotGit objects. scanner := bufio.NewScanner(f) for scanner.Scan() { path := scanner.Text() - if !filepath.IsAbs(path) { - // For relative paths, we can perform an internal conversion to - // slash so that they work cross-platform. - slashPath := filepath.ToSlash(path) - // If the path is not absolute, it must be relative to object - // database (.git/objects/info). - // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html - // Hence, derive a path relative to DotGit's root. - // "../../../reponame/.git/" -> "../../reponame/.git" - // Remove the first ../ - relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) - normalPath := filepath.FromSlash(relpath) - path = filepath.Join(d.fs.Root(), normalPath) + + // Avoid creating multiple dotgits for the same alternative path. + if _, ok := seen[path]; ok { + continue } - fs := osfs.New(filepath.Dir(path)) - alternates = append(alternates, New(fs)) + + seen[path] = struct{}{} + + if filepath.IsAbs(path) { + // Handling absolute paths should be straight-forward. However, the default osfs (Chroot) + // tries to concatenate an abs path with the root path in some operations (e.g. Stat), + // which leads to unexpected errors. Therefore, make the path relative to the current FS instead. + if reflect.TypeOf(fs) == reflect.TypeOf(&chroot.ChrootHelper{}) { + path, err = filepath.Rel(fs.Root(), path) + if err != nil { + return nil, fmt.Errorf("cannot make path %q relative: %w", path, err) + } + } + } else { + // By Git conventions, relative paths should be based on the object database (.git/objects/info) + // location as per: https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html + // However, due to the nature of go-git and its filesystem handling via Billy, paths cannot + // cross its "chroot boundaries". Therefore, ignore any "../" and treat the path from the + // fs root. If this is not correct based on the dotgit fs, set a different one via AlternatesFS. + abs := filepath.Join(string(filepath.Separator), filepath.ToSlash(path)) + path = filepath.FromSlash(abs) + } + + // Aligns with upstream behavior: exit if target path is not a valid directory. + if fi, err := fs.Stat(path); err != nil || !fi.IsDir() { + return nil, fmt.Errorf("invalid object directory %q: %w", path, err) + } + afs, err := fs.Chroot(filepath.Dir(path)) + if err != nil { + return nil, fmt.Errorf("cannot chroot %q: %w", path, err) + } + alternates = append(alternates, New(afs)) } if err = scanner.Err(); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go new file mode 100644 index 00000000..975f92ac --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go @@ -0,0 +1,79 @@ +package dotgit + +import ( + "fmt" + "io" + "os" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/objfile" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var _ (plumbing.EncodedObject) = &EncodedObject{} + +type EncodedObject struct { + dir *DotGit + h plumbing.Hash + t plumbing.ObjectType + sz int64 +} + +func (e *EncodedObject) Hash() plumbing.Hash { + return e.h +} + +func (e *EncodedObject) Reader() (io.ReadCloser, error) { + f, err := e.dir.Object(e.h) + if err != nil { + if os.IsNotExist(err) { + return nil, plumbing.ErrObjectNotFound + } + + return nil, err + } + r, err := objfile.NewReader(f) + if err != nil { + return nil, err + } + + t, size, err := r.Header() + if err != nil { + _ = r.Close() + return nil, err + } + if t != e.t { + _ = r.Close() + return nil, objfile.ErrHeader + } + if size != e.sz { + _ = r.Close() + return nil, objfile.ErrHeader + } + return ioutil.NewReadCloserWithCloser(r, f.Close), nil +} + +func (e *EncodedObject) SetType(plumbing.ObjectType) {} + +func (e *EncodedObject) Type() plumbing.ObjectType { + return e.t +} + +func (e *EncodedObject) Size() int64 { + return e.sz +} + +func (e *EncodedObject) SetSize(int64) {} + +func (e *EncodedObject) Writer() (io.WriteCloser, error) { + return nil, fmt.Errorf("not supported") +} + +func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject { + return &EncodedObject{ + dir: dir, + h: h, + t: t, + sz: size, + } +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go index e2ede938..849b7a17 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go @@ -9,6 +9,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/format/idxfile" "github.com/go-git/go-git/v5/plumbing/format/objfile" "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/hash" "github.com/go-git/go-billy/v5" ) @@ -277,8 +278,8 @@ func (w *ObjectWriter) Close() error { } func (w *ObjectWriter) save() error { - hash := w.Hash().String() - file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) + hex := w.Hash().String() + file := w.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize]) return w.fs.Rename(w.f.Name(), file) } diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go index 0c25dad6..e812fe93 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go @@ -4,6 +4,7 @@ import ( "bytes" "io" "os" + "sync" "time" "github.com/go-git/go-git/v5/plumbing" @@ -145,6 +146,19 @@ func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.H return o.Hash(), err } +// LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file. +// It first write the header passing on the object type and size, so +// that the object contents can be written later, without the need to +// create a MemoryObject and buffering its entire contents into memory. +func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) { + ow, err := s.dir.NewObject() + if err != nil { + return nil, nil, err + } + + return ow, ow.WriteHeader, nil +} + // HasEncodedObject returns nil if the object exists, without actually // reading the object data from storage. func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { @@ -204,9 +218,9 @@ func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfi var p *packfile.Packfile if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold) } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) + p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold) } return p, s.storePackfileInCache(pack, p) @@ -389,7 +403,6 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb return cacheObj, nil } - obj = s.NewEncodedObject() r, err := objfile.NewReader(f) if err != nil { return nil, err @@ -402,6 +415,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb return nil, err } + if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold { + obj = dotgit.NewEncodedObject(s.dir, h, t, size) + return obj, nil + } + + obj = s.NewEncodedObject() + obj.SetType(t) obj.SetSize(size) w, err := obj.Writer() @@ -413,10 +433,21 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb s.objectCache.Put(obj) - _, err = io.Copy(w, r) + bufp := copyBufferPool.Get().(*[]byte) + buf := *bufp + _, err = io.CopyBuffer(w, r, buf) + copyBufferPool.Put(bufp) + return obj, err } +var copyBufferPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 32*1024) + return &b + }, +} + // Get returns the object with the given hash, by searching for it in // the packfile. func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( @@ -519,14 +550,21 @@ func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, pl return plumbing.ZeroHash, plumbing.ZeroHash, -1 } +// HashesWithPrefix returns all objects with a hash that starts with a prefix by searching for +// them in the packfile and the git object directories. func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) { hashes, err := s.dir.ObjectsWithPrefix(prefix) if err != nil { return nil, err } + seen := hashListAsMap(hashes) + // TODO: This could be faster with some idxfile changes, // or diving into the packfile. + if err := s.requireIndex(); err != nil { + return nil, err + } for _, index := range s.index { ei, err := index.Entries() if err != nil { @@ -540,6 +578,9 @@ func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) return nil, err } if bytes.HasPrefix(e.Hash[:], prefix) { + if _, ok := seen[e.Hash]; ok { + continue + } hashes = append(hashes, e.Hash) } } @@ -595,6 +636,7 @@ func (s *ObjectStorage) buildPackfileIters( return newPackfileIter( s.dir.Fs(), pack, t, seen, s.index[h], s.objectCache, s.options.KeepDescriptors, + s.options.LargeObjectThreshold, ) }, }, nil @@ -684,6 +726,7 @@ func NewPackfileIter( idxFile billy.File, t plumbing.ObjectType, keepPack bool, + largeObjectThreshold int64, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { @@ -695,7 +738,7 @@ func NewPackfileIter( } seen := make(map[plumbing.Hash]struct{}) - return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold) } func newPackfileIter( @@ -706,12 +749,13 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, keepPack bool, + largeObjectThreshold int64, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { - p = packfile.NewPackfileWithCache(index, fs, f, cache) + p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold) } else { - p = packfile.NewPackfile(index, fs, f) + p = packfile.NewPackfile(index, fs, f, largeObjectThreshold) } iter, err := p.GetByType(t) diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go index afb600cf..ac48fdfb 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go @@ -34,7 +34,7 @@ func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { return err } -// Shallow return the shallow commits reading from shallo file from .git +// Shallow returns the shallow commits reading from shallo file from .git func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { f, err := s.dir.Shallow() if f == nil || err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go index 8b69b27b..951ea00c 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go @@ -34,6 +34,13 @@ type Options struct { // MaxOpenDescriptors is the max number of file descriptors to keep // open. If KeepDescriptors is true, all file descriptors will remain open. MaxOpenDescriptors int + // LargeObjectThreshold maximum object size (in bytes) that will be read in to memory. + // If left unset or set to 0 there is no limit + LargeObjectThreshold int64 + // AlternatesFS provides the billy filesystem to be used for Git Alternates. + // If none is provided, it falls back to using the underlying instance used for + // DotGit. + AlternatesFS billy.Filesystem } // NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. @@ -46,6 +53,7 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, + AlternatesFS: ops.AlternatesFS, } dir := dotgit.NewWithOptions(fs, dirOps) @@ -71,3 +79,7 @@ func (s *Storage) Filesystem() billy.Filesystem { func (s *Storage) Init() error { return s.dir.Initialize() } + +func (s *Storage) AddAlternate(remote string) error { + return s.dir.AddAlternate(remote) +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go index a8e56697..79211c7c 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go +++ b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go @@ -193,7 +193,7 @@ func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) er return nil } -var errNotSupported = fmt.Errorf("Not supported") +var errNotSupported = fmt.Errorf("not supported") func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { return time.Time{}, errNotSupported @@ -202,6 +202,10 @@ func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { return errNotSupported } +func (o *ObjectStorage) AddAlternate(remote string) error { + return errNotSupported +} + type TxObjectStorage struct { Storage *ObjectStorage Objects map[plumbing.Hash]plumbing.EncodedObject diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go index dff26b0d..84f020dc 100644 --- a/vendor/github.com/go-git/go-git/v5/submodule.go +++ b/vendor/github.com/go-git/go-git/v5/submodule.go @@ -5,11 +5,13 @@ import ( "context" "errors" "fmt" + "path" "github.com/go-git/go-billy/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/transport" ) var ( @@ -131,9 +133,29 @@ func (s *Submodule) Repository() (*Repository, error) { return nil, err } + moduleEndpoint, err := transport.NewEndpoint(s.c.URL) + if err != nil { + return nil, err + } + + if !path.IsAbs(moduleEndpoint.Path) && moduleEndpoint.Protocol == "file" { + remotes, err := s.w.r.Remotes() + if err != nil { + return nil, err + } + + rootEndpoint, err := transport.NewEndpoint(remotes[0].c.URLs[0]) + if err != nil { + return nil, err + } + + rootEndpoint.Path = path.Join(rootEndpoint.Path, moduleEndpoint.Path) + *moduleEndpoint = *rootEndpoint + } + _, err = r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, - URLs: []string{s.c.URL}, + URLs: []string{moduleEndpoint.String()}, }) return r, err @@ -151,7 +173,7 @@ func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { // setting in the options SubmoduleUpdateOptions.Init equals true. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { return s.update(ctx, o, plumbing.ZeroHash) @@ -221,7 +243,7 @@ func (s *Submodule) fetchAndCheckout( ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, ) error { if !o.NoFetch { - err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth}) + err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth, Depth: o.Depth}) if err != nil && err != NoErrAlreadyUpToDate { return err } @@ -232,6 +254,25 @@ func (s *Submodule) fetchAndCheckout( return err } + // Handle a case when submodule refers to an orphaned commit that's still reachable + // through Git server using a special protocol capability[1]. + // + // [1]: https://git-scm.com/docs/protocol-capabilities#_allow_reachable_sha1_in_want + if !o.NoFetch { + if _, err := w.r.Object(plumbing.AnyObject, hash); err != nil { + refSpec := config.RefSpec("+" + hash.String() + ":" + hash.String()) + + err := r.FetchContext(ctx, &FetchOptions{ + Auth: o.Auth, + RefSpecs: []config.RefSpec{refSpec}, + Depth: o.Depth, + }) + if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported { + return err + } + } + } + if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { return err } @@ -262,7 +303,7 @@ func (s Submodules) Update(o *SubmoduleUpdateOptions) error { // UpdateContext updates all the submodules in this list. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { for _, sub := range s { diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go index a14d48db..b8f9df1a 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go +++ b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go @@ -1,4 +1,4 @@ -// Package binary implements sintax-sugar functions on top of the standard +// Package binary implements syntax-sugar functions on top of the standard // library binary package package binary diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go index e9dcbfe4..235af717 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go +++ b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go @@ -7,7 +7,7 @@ import ( "errors" "io" - "github.com/jbenet/go-context/io" + ctxio "github.com/jbenet/go-context/io" ) type readPeeker interface { @@ -55,6 +55,28 @@ func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { return &readCloser{Reader: r, closer: c} } +type readCloserCloser struct { + io.ReadCloser + closer func() error +} + +func (r *readCloserCloser) Close() (err error) { + defer func() { + if err == nil { + err = r.closer() + return + } + _ = r.closer() + }() + return r.ReadCloser.Close() +} + +// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and +// `io.Closer` that ensures that the closer is closed on close +func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser { + return &readCloserCloser{ReadCloser: r, closer: c} +} + type writeCloser struct { io.Writer closer io.Closer @@ -82,6 +104,24 @@ func WriteNopCloser(w io.Writer) io.WriteCloser { return writeNopCloser{w} } +type readerAtAsReader struct { + io.ReaderAt + offset int64 +} + +func (r *readerAtAsReader) Read(bs []byte) (int, error) { + n, err := r.ReaderAt.ReadAt(bs, r.offset) + r.offset += int64(n) + return n, err +} + +func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader { + return &readerAtAsReader{ + ReaderAt: r, + offset: offset, + } +} + // CheckClose calls Close on the given io.Closer. If the given *error points to // nil, it will be assigned the error returned by Close. Otherwise, any error // returned by Close will be ignored. CheckClose is usually called with defer. @@ -155,7 +195,7 @@ func NewWriterOnError(w io.Writer, notify func(error)) io.Writer { } // NewWriteCloserOnError returns a io.WriteCloser that call the notify function -//when an unexpected (!io.EOF) error happens, after call Write function. +// when an unexpected (!io.EOF) error happens, after call Write function. func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { return NewWriteCloser(NewWriterOnError(w, notify), w) } diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go index bd084b2a..8090942d 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go @@ -55,7 +55,7 @@ package merkletrie // Here is a full list of all the cases that are similar and how to // merge them together into more general cases. Each general case // is labeled with an uppercase letter for further reference, and it -// is followed by the pseudocode of the checks you have to perfrom +// is followed by the pseudocode of the checks you have to perform // on both noders to see if you are in such a case, the actions to // perform (i.e. what changes to output) and how to advance the // iterators of each tree to continue the comparison process. @@ -304,13 +304,38 @@ func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder, return nil, err } case onlyToRemains: - if err = ret.AddRecursiveInsert(to); err != nil { - return nil, err + if to.Skip() { + if err = ret.AddRecursiveDelete(to); err != nil { + return nil, err + } + } else { + if err = ret.AddRecursiveInsert(to); err != nil { + return nil, err + } } if err = ii.nextTo(); err != nil { return nil, err } case bothHaveNodes: + if from.Skip() { + if err = ret.AddRecursiveDelete(from); err != nil { + return nil, err + } + if err := ii.nextBoth(); err != nil { + return nil, err + } + break + } + if to.Skip() { + if err = ret.AddRecursiveDelete(to); err != nil { + return nil, err + } + if err := ii.nextBoth(); err != nil { + return nil, err + } + break + } + if err = diffNodes(&ret, ii); err != nil { return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go index 2fc3d7a6..33800627 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go @@ -29,6 +29,8 @@ type node struct { hash []byte children []noder.Noder isDir bool + mode os.FileMode + size int64 } // NewRootNode returns the root node based on a given billy.Filesystem. @@ -48,8 +50,15 @@ func NewRootNode( // difftree algorithm will detect changes in the contents of files and also in // their mode. // +// Please note that the hash is calculated on first invocation of Hash(), +// meaning that it will not update when the underlying file changes +// between invocations. +// // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { + if n.hash == nil { + n.calculateHash() + } return n.hash } @@ -61,6 +70,10 @@ func (n *node) IsDir() bool { return n.isDir } +func (n *node) Skip() bool { + return false +} + func (n *node) Children() ([]noder.Noder, error) { if err := n.calculateChildren(); err != nil { return nil, err @@ -99,6 +112,10 @@ func (n *node) calculateChildren() error { continue } + if file.Mode()&os.ModeSocket != 0 { + continue + } + c, err := n.newChildNode(file) if err != nil { return err @@ -113,81 +130,74 @@ func (n *node) calculateChildren() error { func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - node := &node{ fs: n.fs, submodules: n.submodules, path: path, - hash: hash, isDir: file.IsDir(), + size: file.Size(), + mode: file.Mode(), } - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) + if _, isSubmodule := n.submodules[path]; isSubmodule { node.isDir = false } return node, nil } -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil +func (n *node) calculateHash() { + if n.isDir { + n.hash = make([]byte, 24) + return + } + mode, err := filemode.NewFromOSFileMode(n.mode) + if err != nil { + n.hash = plumbing.ZeroHash[:] + return + } + if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule { + n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...) + return } - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) + if n.mode&os.ModeSymlink != 0 { + hash = n.doCalculateHashForSymlink() } else { - hash, err = n.doCalculateHashForRegular(path, file) + hash = n.doCalculateHashForRegular() } - - if err != nil { - return nil, err - } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err - } - - return append(hash[:], mode.Bytes()...), nil + n.hash = append(hash[:], mode.Bytes()...) } -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) +func (n *node) doCalculateHashForRegular() plumbing.Hash { + f, err := n.fs.Open(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } defer f.Close() - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) +func (n *node) doCalculateHashForSymlink() plumbing.Hash { + target, err := n.fs.Readlink(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } func (n *node) String() string { diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go index d05b0c69..c1809f7e 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go @@ -19,6 +19,7 @@ type node struct { entry *index.Entry children []noder.Noder isDir bool + skip bool } // NewRootNode returns the root node of a computed tree from a index.Index, @@ -39,7 +40,7 @@ func NewRootNode(idx *index.Index) noder.Noder { continue } - n := &node{path: fullpath} + n := &node{path: fullpath, skip: e.SkipWorktree} if fullpath == e.Name { n.entry = e } else { @@ -58,6 +59,10 @@ func (n *node) String() string { return n.path } +func (n *node) Skip() bool { + return n.skip +} + // Hash the hash of a filesystem is a 24-byte slice, is the result of // concatenating the computed plumbing.Hash of the file as a Blob and its // plumbing.FileMode; that way the difftree algorithm will detect changes in the diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go index d6b3de4a..6d22b8c1 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go @@ -53,6 +53,7 @@ type Noder interface { // implement NumChildren in O(1) while Children is usually more // complex. NumChildren() (int, error) + Skip() bool } // NoChildren represents the children of a noder without children. diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go index 1c7ef54e..6c1d3633 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go @@ -15,6 +15,14 @@ import ( // not be used. type Path []Noder +func (p Path) Skip() bool { + if len(p) > 0 { + return p.Last().Skip() + } + + return false +} + // String returns the full path of the final noder as a string, using // "/" as the separator. func (p Path) String() string { diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/utils/sync/BUILD.bazel new file mode 100644 index 00000000..9a7b6566 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sync", + srcs = [ + "bufio.go", + "bytes.go", + "zlib.go", + ], + importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/utils/sync", + importpath = "github.com/go-git/go-git/v5/utils/sync", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go new file mode 100644 index 00000000..5009ea80 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go @@ -0,0 +1,29 @@ +package sync + +import ( + "bufio" + "io" + "sync" +) + +var bufioReader = sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, +} + +// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool. +// Returns a bufio.Reader that is resetted with reader and ready for use. +// +// After use, the *bufio.Reader should be put back into the sync.Pool +// by calling PutBufioReader. +func GetBufioReader(reader io.Reader) *bufio.Reader { + r := bufioReader.Get().(*bufio.Reader) + r.Reset(reader) + return r +} + +// PutBufioReader puts reader back into its sync.Pool. +func PutBufioReader(reader *bufio.Reader) { + bufioReader.Put(reader) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go new file mode 100644 index 00000000..dd06fc0b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go @@ -0,0 +1,51 @@ +package sync + +import ( + "bytes" + "sync" +) + +var ( + byteSlice = sync.Pool{ + New: func() interface{} { + b := make([]byte, 16*1024) + return &b + }, + } + bytesBuffer = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, + } +) + +// GetByteSlice returns a *[]byte that is managed by a sync.Pool. +// The initial slice length will be 16384 (16kb). +// +// After use, the *[]byte should be put back into the sync.Pool +// by calling PutByteSlice. +func GetByteSlice() *[]byte { + buf := byteSlice.Get().(*[]byte) + return buf +} + +// PutByteSlice puts buf back into its sync.Pool. +func PutByteSlice(buf *[]byte) { + byteSlice.Put(buf) +} + +// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool. +// Returns a buffer that is resetted and ready for use. +// +// After use, the *bytes.Buffer should be put back into the sync.Pool +// by calling PutBytesBuffer. +func GetBytesBuffer() *bytes.Buffer { + buf := bytesBuffer.Get().(*bytes.Buffer) + buf.Reset() + return buf +} + +// PutBytesBuffer puts buf back into its sync.Pool. +func PutBytesBuffer(buf *bytes.Buffer) { + bytesBuffer.Put(buf) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go b/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go new file mode 100644 index 00000000..c6138859 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go @@ -0,0 +1,74 @@ +package sync + +import ( + "bytes" + "compress/zlib" + "io" + "sync" +) + +var ( + zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} + zlibReader = sync.Pool{ + New: func() interface{} { + r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) + return ZLibReader{ + Reader: r.(zlibReadCloser), + } + }, + } + zlibWriter = sync.Pool{ + New: func() interface{} { + return zlib.NewWriter(nil) + }, + } +) + +type zlibReadCloser interface { + io.ReadCloser + zlib.Resetter +} + +type ZLibReader struct { + dict *[]byte + Reader zlibReadCloser +} + +// GetZlibReader returns a ZLibReader that is managed by a sync.Pool. +// Returns a ZLibReader that is resetted using a dictionary that is +// also managed by a sync.Pool. +// +// After use, the ZLibReader should be put back into the sync.Pool +// by calling PutZlibReader. +func GetZlibReader(r io.Reader) (ZLibReader, error) { + z := zlibReader.Get().(ZLibReader) + z.dict = GetByteSlice() + + err := z.Reader.Reset(r, *z.dict) + + return z, err +} + +// PutZlibReader puts z back into its sync.Pool, first closing the reader. +// The Byte slice dictionary is also put back into its sync.Pool. +func PutZlibReader(z ZLibReader) { + z.Reader.Close() + PutByteSlice(z.dict) + zlibReader.Put(z) +} + +// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool. +// Returns a writer that is resetted with w and ready for use. +// +// After use, the *zlib.Writer should be put back into the sync.Pool +// by calling PutZlibWriter. +func GetZlibWriter(w io.Writer) *zlib.Writer { + z := zlibWriter.Get().(*zlib.Writer) + z.Reset(w) + return z +} + +// PutZlibWriter puts w back into its sync.Pool. +func PutZlibWriter(w *zlib.Writer) { + zlibWriter.Put(w) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/trace/BUILD.bazel b/vendor/github.com/go-git/go-git/v5/utils/trace/BUILD.bazel new file mode 100644 index 00000000..7ad18bf3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/trace/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "trace", + srcs = ["trace.go"], + importmap = "peridot.resf.org/vendor/github.com/go-git/go-git/v5/utils/trace", + importpath = "github.com/go-git/go-git/v5/utils/trace", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go b/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go new file mode 100644 index 00000000..3e15c5b9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go @@ -0,0 +1,55 @@ +package trace + +import ( + "fmt" + "log" + "os" + "sync/atomic" +) + +var ( + // logger is the logger to use for tracing. + logger = newLogger() + + // current is the targets that are enabled for tracing. + current atomic.Int32 +) + +func newLogger() *log.Logger { + return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) +} + +// Target is a tracing target. +type Target int32 + +const ( + // General traces general operations. + General Target = 1 << iota + + // Packet traces git packets. + Packet +) + +// SetTarget sets the tracing targets. +func SetTarget(target Target) { + current.Store(int32(target)) +} + +// SetLogger sets the logger to use for tracing. +func SetLogger(l *log.Logger) { + logger = l +} + +// Print prints the given message only if the target is enabled. +func (t Target) Print(args ...interface{}) { + if int32(t)¤t.Load() != 0 { + logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck + } +} + +// Printf prints the given message only if the target is enabled. +func (t Target) Printf(format string, args ...interface{}) { + if int32(t)¤t.Load() != 0 { + logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck + } +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index 62ad03b9..ab11d42d 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -5,12 +5,13 @@ import ( "errors" "fmt" "io" - stdioutil "io/ioutil" "os" "path/filepath" + "runtime" "strings" - "sync" + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/util" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/filemode" @@ -20,9 +21,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/utils/ioutil" "github.com/go-git/go-git/v5/utils/merkletrie" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/util" + "github.com/go-git/go-git/v5/utils/sync" ) var ( @@ -59,7 +58,7 @@ func (w *Worktree) Pull(o *PullOptions) error { // Pull only supports merges where the can be resolved as a fast-forward. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { if err := o.Validate(); err != nil { @@ -72,11 +71,15 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { } fetchHead, err := remote.fetch(ctx, &FetchOptions{ - RemoteName: o.RemoteName, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Force: o.Force, + RemoteName: o.RemoteName, + RemoteURL: o.RemoteURL, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Force: o.Force, + InsecureSkipTLS: o.InsecureSkipTLS, + CABundle: o.CABundle, + ProxyOptions: o.ProxyOptions, }) updated := true @@ -93,7 +96,15 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { head, err := w.r.Head() if err == nil { - headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash()) + // if we don't have a shallows list, just ignore it + shallowList, _ := w.r.Storer.Shallow() + + var earliestShallow *plumbing.Hash + if len(shallowList) > 0 { + earliestShallow = &shallowList[0] + } + + headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash(), earliestShallow) if err != nil { return err } @@ -102,7 +113,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { return NoErrAlreadyUpToDate } - ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash()) + ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash(), earliestShallow) if err != nil { return err } @@ -180,9 +191,18 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error { return err } + if len(opts.SparseCheckoutDirectories) > 0 { + return w.ResetSparsely(ro, opts.SparseCheckoutDirectories) + } + return w.Reset(ro) } + func (w *Worktree) createBranch(opts *CheckoutOptions) error { + if err := opts.Branch.Validate(); err != nil { + return err + } + _, err := w.r.Storer.Reference(opts.Branch) if err == nil { return fmt.Errorf("a branch named %q already exists", opts.Branch) @@ -207,20 +227,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error { } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil + hash := opts.Hash + if hash.IsZero() { + b, err := w.r.Reference(opts.Branch, true) + if err != nil { + return plumbing.ZeroHash, err + } + + hash = b.Hash() } - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } - - if !b.Name().IsTag() { - return b.Hash(), nil - } - - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) + o, err := w.r.Object(plumbing.AnyObject, hash) if err != nil { return plumbing.ZeroHash, err } @@ -228,7 +245,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) + return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType) } return o.Target, nil @@ -236,7 +253,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing return o.Hash, nil } - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) + return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { @@ -260,8 +277,7 @@ func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbin return w.r.Storer.SetReference(head) } -// Reset the worktree to a specified state. -func (w *Worktree) Reset(opts *ResetOptions) error { +func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error { if err := opts.Validate(w.r); err != nil { return err } @@ -285,13 +301,13 @@ func (w *Worktree) Reset(opts *ResetOptions) error { return nil } - t, err := w.getTreeFromCommitHash(opts.Commit) + t, err := w.r.getTreeFromCommitHash(opts.Commit) if err != nil { return err } if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetIndex(t); err != nil { + if err := w.resetIndex(t, dirs); err != nil { return err } } @@ -305,8 +321,17 @@ func (w *Worktree) Reset(opts *ResetOptions) error { return nil } -func (w *Worktree) resetIndex(t *object.Tree) error { +// Reset the worktree to a specified state. +func (w *Worktree) Reset(opts *ResetOptions) error { + return w.ResetSparsely(opts, nil) +} + +func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error { idx, err := w.r.Storer.Index() + if len(dirs) > 0 { + idx.SkipUnless(dirs) + } + if err != nil { return err } @@ -355,7 +380,7 @@ func (w *Worktree) resetIndex(t *object.Tree) error { } func (w *Worktree) resetWorktree(t *object.Tree) error { - changes, err := w.diffStagingWithWorktree(true) + changes, err := w.diffStagingWithWorktree(true, false) if err != nil { return err } @@ -367,6 +392,9 @@ func (w *Worktree) resetWorktree(t *object.Tree) error { b := newIndexBuilder(idx) for _, ch := range changes { + if err := w.validChange(ch); err != nil { + return err + } if err := w.checkoutChange(ch, t, b); err != nil { return err } @@ -376,6 +404,108 @@ func (w *Worktree) resetWorktree(t *object.Tree) error { return w.r.Storer.SetIndex(idx) } +// worktreeDeny is a list of paths that are not allowed +// to be used when resetting the worktree. +var worktreeDeny = map[string]struct{}{ + // .git + GitDirName: {}, + + // For other historical reasons, file names that do not conform to the 8.3 + // format (up to eight characters for the basename, three for the file + // extension, certain characters not allowed such as `+`, etc) are associated + // with a so-called "short name", at least on the `C:` drive by default. + // Which means that `git~1/` is a valid way to refer to `.git/`. + "git~1": {}, +} + +// validPath checks whether paths are valid. +// The rules around invalid paths could differ from upstream based on how +// filesystems are managed within go-git, but they are largely the same. +// +// For upstream rules: +// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/read-cache.c#L946 +// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/path.c#L1383 +func validPath(paths ...string) error { + for _, p := range paths { + parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') }) + if len(parts) == 0 { + return fmt.Errorf("invalid path: %q", p) + } + + if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied { + return fmt.Errorf("invalid path prefix: %q", p) + } + + if runtime.GOOS == "windows" { + // Volume names are not supported, in both formats: \\ and :. + if vol := filepath.VolumeName(p); vol != "" { + return fmt.Errorf("invalid path: %q", p) + } + + if !windowsValidPath(parts[0]) { + return fmt.Errorf("invalid path: %q", p) + } + } + + for _, part := range parts { + if part == ".." { + return fmt.Errorf("invalid path %q: cannot use '..'", p) + } + } + } + return nil +} + +// windowsPathReplacer defines the chars that need to be replaced +// as part of windowsValidPath. +var windowsPathReplacer *strings.Replacer + +func init() { + windowsPathReplacer = strings.NewReplacer(" ", "", ".", "") +} + +func windowsValidPath(part string) bool { + if len(part) > 3 && strings.EqualFold(part[:4], GitDirName) { + // For historical reasons, file names that end in spaces or periods are + // automatically trimmed. Therefore, `.git . . ./` is a valid way to refer + // to `.git/`. + if windowsPathReplacer.Replace(part[4:]) == "" { + return false + } + + // For yet other historical reasons, NTFS supports so-called "Alternate Data + // Streams", i.e. metadata associated with a given file, referred to via + // `::`. There exists a default stream + // type for directories, allowing `.git/` to be accessed via + // `.git::$INDEX_ALLOCATION/`. + // + // For performance reasons, _all_ Alternate Data Streams of `.git/` are + // forbidden, not just `::$INDEX_ALLOCATION`. + if len(part) > 4 && part[4:5] == ":" { + return false + } + } + return true +} + +func (w *Worktree) validChange(ch merkletrie.Change) error { + action, err := ch.Action() + if err != nil { + return nil + } + + switch action { + case merkletrie.Delete: + return validPath(ch.From.String()) + case merkletrie.Insert: + return validPath(ch.To.String()) + case merkletrie.Modify: + return validPath(ch.From.String(), ch.To.String()) + } + + return nil +} + func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error { a, err := ch.Action() if err != nil { @@ -396,7 +526,7 @@ func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *ind isSubmodule = e.Mode == filemode.Submodule case merkletrie.Delete: - return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String()) + return rmFileAndDirsIfEmpty(w.Filesystem, ch.From.String()) } if isSubmodule { @@ -407,7 +537,7 @@ func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *ind } func (w *Worktree) containsUnstagedChanges() (bool, error) { - ch, err := w.diffStagingWithWorktree(false) + ch, err := w.diffStagingWithWorktree(false, true) if err != nil { return false, err } @@ -518,12 +648,6 @@ func (w *Worktree) checkoutChangeRegularFile(name string, return nil } -var copyBufferPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - func (w *Worktree) checkoutFile(f *object.File) (err error) { mode, err := f.Mode.ToOSFileMode() if err != nil { @@ -547,13 +671,18 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) { } defer ioutil.CheckClose(to, &err) - buf := copyBufferPool.Get().([]byte) - _, err = io.CopyBuffer(to, from, buf) - copyBufferPool.Put(buf) + buf := sync.GetByteSlice() + _, err = io.CopyBuffer(to, from, *buf) + sync.PutByteSlice(buf) return } func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { + // https://github.com/git/git/commit/10ecfa76491e4923988337b2e2243b05376b40de + if strings.EqualFold(f.Name, gitmodulesFile) { + return ErrGitModulesSymlink + } + from, err := f.Reader() if err != nil { return @@ -561,7 +690,7 @@ func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { defer ioutil.CheckClose(from, &err) - bytes, err := stdioutil.ReadAll(from) + bytes, err := io.ReadAll(from) if err != nil { return } @@ -625,8 +754,8 @@ func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuil return nil } -func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { - c, err := w.r.CommitObject(commit) +func (r *Repository) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { + c, err := r.CommitObject(commit) if err != nil { return nil, err } @@ -710,13 +839,17 @@ func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { } defer f.Close() - input, err := stdioutil.ReadAll(f) + input, err := io.ReadAll(f) if err != nil { return nil, err } m := config.NewModules() - return m, m.Unmarshal(input) + if err := m.Unmarshal(input); err != nil { + return m, err + } + + return m, nil } // Clean the worktree by removing untracked files. @@ -765,9 +898,11 @@ func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files } } - if opts.Dir { - return doCleanDirectories(w.Filesystem, dir) + if opts.Dir && dir != "" { + _, err := removeDirIfEmpty(w.Filesystem, dir) + return err } + return nil } @@ -788,9 +923,9 @@ func (gr GrepResult) String() string { return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) } -// Grep performs grep on a worktree. -func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { - if err := opts.Validate(w); err != nil { +// Grep performs grep on a repository. +func (r *Repository) Grep(opts *GrepOptions) ([]GrepResult, error) { + if err := opts.validate(r); err != nil { return nil, err } @@ -800,7 +935,7 @@ func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { var treeName string if opts.ReferenceName != "" { - ref, err := w.r.Reference(opts.ReferenceName, true) + ref, err := r.Reference(opts.ReferenceName, true) if err != nil { return nil, err } @@ -813,7 +948,7 @@ func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { // Obtain a tree from the commit hash and get a tracked files iterator from // the tree. - tree, err := w.getTreeFromCommitHash(commitHash) + tree, err := r.getTreeFromCommitHash(commitHash) if err != nil { return nil, err } @@ -822,6 +957,11 @@ func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { return findMatchInFiles(fileiter, treeName, opts) } +// Grep performs grep on a worktree. +func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { + return w.r.Grep(opts) +} + // findMatchInFiles takes a FileIter, worktree name and GrepOptions, and // returns a slice of GrepResult containing the result of regex pattern matching // in content of all the files. @@ -908,25 +1048,52 @@ func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]G return grepResults, nil } -func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { +// will walk up the directory tree removing all encountered empty +// directories, not just the one containing this file +func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error { if err := util.RemoveAll(fs, name); err != nil { return err } dir := filepath.Dir(name) - return doCleanDirectories(fs, dir) + for { + removed, err := removeDirIfEmpty(fs, dir) + if err != nil { + return err + } + + if !removed { + // directory was not empty and not removed, + // stop checking parents + break + } + + // move to parent directory + dir = filepath.Dir(dir) + } + + return nil } -// doCleanDirectories removes empty subdirs (without files) -func doCleanDirectories(fs billy.Filesystem, dir string) error { +// removeDirIfEmpty will remove the supplied directory `dir` if +// `dir` is empty +// returns true if the directory was removed +func removeDirIfEmpty(fs billy.Filesystem, dir string) (bool, error) { files, err := fs.ReadDir(dir) if err != nil { - return err + return false, err } - if len(files) == 0 { - return fs.Remove(dir) + + if len(files) > 0 { + return false, nil } - return nil + + err = fs.Remove(dir) + if err != nil { + return false, err + } + + return true, nil } type indexBuilder struct { diff --git a/vendor/github.com/go-git/go-git/v5/worktree_bsd.go b/vendor/github.com/go-git/go-git/v5/worktree_bsd.go index d4ea3275..d4682eb8 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_bsd.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_bsd.go @@ -12,7 +12,7 @@ import ( func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) + e.CreatedAt = time.Unix(os.Atimespec.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go index a9d0e040..f62054bc 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -2,6 +2,8 @@ package git import ( "bytes" + "errors" + "io" "path" "sort" "strings" @@ -12,8 +14,15 @@ import ( "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/storage" + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/go-git/go-billy/v5" - "golang.org/x/crypto/openpgp" +) + +var ( + // ErrEmptyCommit occurs when a commit is attempted using a clean + // working tree, with no changes to be committed. + ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree") ) // Commit stores the current contents of the index in a new commit along with @@ -29,6 +38,24 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error } } + var treeHash plumbing.Hash + + if opts.Amend { + head, err := w.r.Head() + if err != nil { + return plumbing.ZeroHash, err + } + headCommit, err := w.r.CommitObject(head.Hash()) + if err != nil { + return plumbing.ZeroHash, err + } + + opts.Parents = nil + if len(headCommit.ParentHashes) != 0 { + opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]} + } + } + idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err @@ -39,12 +66,12 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error s: w.r.Storer, } - tree, err := h.BuildTree(idx) + treeHash, err = h.BuildTree(idx, opts) if err != nil { return plumbing.ZeroHash, err } - commit, err := w.buildCommitObject(msg, opts, tree) + commit, err := w.buildCommitObject(msg, opts, treeHash) if err != nil { return plumbing.ZeroHash, err } @@ -101,12 +128,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb ParentHashes: opts.Parents, } - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) + // Convert SignKey into a Signer if set. Existing Signer should take priority. + signer := opts.Signer + if signer == nil && opts.SignKey != nil { + signer = &gpgSigner{key: opts.SignKey} + } + if signer != nil { + sig, err := signObject(signer, commit) if err != nil { return plumbing.ZeroHash, err } - commit.PGPSignature = sig + commit.PGPSignature = string(sig) } obj := w.r.Storer.NewEncodedObject() @@ -116,20 +148,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb return w.r.Storer.SetEncodedObject(obj) } -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err - } +type gpgSigner struct { + key *openpgp.Entity + cfg *packet.Config +} + +func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) { var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err + if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil { + return nil, err } - return b.String(), nil + return b.Bytes(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects @@ -145,7 +174,11 @@ type buildTreeHelper struct { // BuildTree builds the tree objects and push its to the storer, the hash // of the root tree is returned. -func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) { +func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) { + if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) { + return plumbing.ZeroHash, ErrEmptyCommit + } + const rootNode = "" h.trees = map[string]*object.Tree{rootNode: {}} h.entries = map[string]*object.TreeEntry{} @@ -230,5 +263,9 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr return plumbing.ZeroHash, err } + hash := o.Hash() + if h.s.HasEncodedObject(hash) == nil { + return hash, nil + } return h.s.SetEncodedObject(o) } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_js.go b/vendor/github.com/go-git/go-git/v5/worktree_js.go new file mode 100644 index 00000000..7267d055 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_js.go @@ -0,0 +1,26 @@ +// +build js + +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Ctime), int64(os.CtimeNsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/vendor/github.com/go-git/go-git/v5/worktree_linux.go index cf0db252..6fcace2f 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_linux.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_linux.go @@ -12,7 +12,7 @@ import ( func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) + e.CreatedAt = time.Unix(os.Ctim.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index c639f132..dd9b2439 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -74,7 +74,7 @@ func (w *Worktree) status(commit plumbing.Hash) (Status, error) { } } - right, err := w.diffStagingWithWorktree(false) + right, err := w.diffStagingWithWorktree(false, true) if err != nil { return nil, err } @@ -113,7 +113,7 @@ func nameFromAction(ch *merkletrie.Change) string { return name } -func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) { +func (w *Worktree) diffStagingWithWorktree(reverse, excludeIgnoredChanges bool) (merkletrie.Changes, error) { idx, err := w.r.Storer.Index() if err != nil { return nil, err @@ -138,7 +138,10 @@ func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, er return nil, err } - return w.excludeIgnoredChanges(c), nil + if excludeIgnoredChanges { + return w.excludeIgnoredChanges(c), nil + } + return c, nil } func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { @@ -169,7 +172,9 @@ func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie. if len(path) != 0 { isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) if m.Match(path, isDir) { - continue + if len(ch.From) == 0 { + continue + } } } res = append(res, ch) @@ -266,14 +271,10 @@ func diffTreeIsEquals(a, b noder.Hasher) bool { // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - return w.doAdd(path, make([]gitignore.Pattern, 0)) + return w.doAdd(path, make([]gitignore.Pattern, 0), false) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { - files, err := w.Filesystem.ReadDir(directory) - if err != nil { - return false, err - } if len(ignorePattern) > 0 { m := gitignore.NewMatcher(ignorePattern) matchPath := strings.Split(directory, string(os.PathSeparator)) @@ -283,32 +284,29 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, } } - for _, file := range files { - name := path.Join(directory, file.Name()) + directory = filepath.ToSlash(filepath.Clean(directory)) - var a bool - if file.IsDir() { - if file.Name() == GitDirName { - // ignore special git directory - continue - } - a, err = w.doAddDirectory(idx, s, name, ignorePattern) - } else { - a, _, err = w.doAddFile(idx, s, name, ignorePattern) + for name := range s { + if !isPathInDirectory(name, directory) { + continue } + var a bool + a, _, err = w.doAddFile(idx, s, name, ignorePattern) if err != nil { return } - if !added && a { - added = true - } + added = added || a } return } +func isPathInDirectory(path, directory string) bool { + return directory == "." || strings.HasPrefix(path, directory+"/") +} + // AddWithOptions file contents to the index, updates the index using the // current content found in the working tree, to prepare the content staged for // the next commit. @@ -323,7 +321,7 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { } if opts.All { - _, err := w.doAdd(".", w.Excludes) + _, err := w.doAdd(".", w.Excludes, false) return err } @@ -331,16 +329,11 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { return w.AddGlob(opts.Glob) } - _, err := w.Add(opts.Path) + _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus) return err } -func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - +func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) { idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err @@ -350,6 +343,17 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbi var added bool fi, err := w.Filesystem.Lstat(path) + + // status is required for doAddDirectory + var s Status + var err2 error + if !skipStatus || fi == nil || fi.IsDir() { + s, err2 = w.Status() + if err2 != nil { + return plumbing.ZeroHash, err2 + } + } + if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { @@ -423,8 +427,9 @@ func (w *Worktree) AddGlob(pattern string) error { // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. +// if s status is nil will skip the status check and update the index anyway func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { + if s != nil && s.File(path).Worktree == Unmodified { return false, h, nil } if len(ignorePattern) > 0 { diff --git a/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go b/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go index f45966be..5b16e70b 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go @@ -12,7 +12,7 @@ import ( func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec)) + e.CreatedAt = time.Unix(os.Atim.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a..7c7f0c69 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f..30568e76 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, true) // escape user-provided keys + + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue + } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) + } + + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.parentValuesStr) - continuing = true - } - - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- - } - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - continuing = true - } - - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + buf.WriteString(bodyStr) } if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + buf.WriteByte('}') // for the whole record } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true + } + + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f573..fdff3fdb 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/google/gnostic-models/compiler/BUILD.bazel b/vendor/github.com/google/gnostic-models/compiler/BUILD.bazel index 1db63bda..66b5f80f 100644 --- a/vendor/github.com/google/gnostic-models/compiler/BUILD.bazel +++ b/vendor/github.com/google/gnostic-models/compiler/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//vendor/github.com/google/gnostic-models/extensions", "//vendor/github.com/google/gnostic-models/jsonschema", "//vendor/gopkg.in/yaml.v3:yaml_v3", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", "@com_github_golang_protobuf//ptypes/any", ], ) diff --git a/vendor/github.com/google/gnostic-models/extensions/BUILD.bazel b/vendor/github.com/google/gnostic-models/extensions/BUILD.bazel index 840bf871..3510dacf 100644 --- a/vendor/github.com/google/gnostic-models/extensions/BUILD.bazel +++ b/vendor/github.com/google/gnostic-models/extensions/BUILD.bazel @@ -10,8 +10,8 @@ go_library( importpath = "github.com/google/gnostic-models/extensions", visibility = ["//visibility:public"], deps = [ - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_golang_protobuf//proto", + "@com_github_golang_protobuf//ptypes", "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//runtime/protoimpl", "@org_golang_google_protobuf//types/known/anypb", diff --git a/vendor/github.com/google/s2a-go/BUILD.bazel b/vendor/github.com/google/s2a-go/BUILD.bazel index 7fe6b60f..cfd05138 100644 --- a/vendor/github.com/google/s2a-go/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/BUILD.bazel @@ -20,7 +20,7 @@ go_library( "//vendor/github.com/google/s2a-go/internal/v2:internal", "//vendor/github.com/google/s2a-go/retry", "//vendor/github.com/google/s2a-go/stream", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//peer", diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/BUILD.bazel b/vendor/github.com/google/s2a-go/internal/handshaker/BUILD.bazel index fc1697a2..8b8a570e 100644 --- a/vendor/github.com/google/s2a-go/internal/handshaker/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/internal/handshaker/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto", "//vendor/github.com/google/s2a-go/internal/record", "//vendor/github.com/google/s2a-go/internal/tokenmanager", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//grpclog", diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/BUILD.bazel b/vendor/github.com/google/s2a-go/internal/handshaker/service/BUILD.bazel index eb241e21..9e16378a 100644 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/BUILD.bazel @@ -7,7 +7,7 @@ go_library( importpath = "github.com/google/s2a-go/internal/handshaker/service", visibility = ["//vendor/github.com/google/s2a-go:__subpackages__"], deps = [ - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//credentials/insecure", ], diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/BUILD.bazel b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/BUILD.bazel index 558f86ab..5f003583 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/BUILD.bazel @@ -11,7 +11,7 @@ go_library( visibility = ["//vendor/github.com/google/s2a-go:__subpackages__"], deps = [ "//vendor/github.com/google/s2a-go/internal/proto/common_go_proto", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//reflect/protoreflect", diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/BUILD.bazel b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/BUILD.bazel index f62fb4eb..10289ca1 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//vendor/github.com/google/s2a-go/internal/proto/common_go_proto", "//vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto", "//vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_google_protobuf//reflect/protoreflect", diff --git a/vendor/github.com/google/s2a-go/internal/v2/BUILD.bazel b/vendor/github.com/google/s2a-go/internal/v2/BUILD.bazel index 288b3030..e7bf0f83 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/BUILD.bazel +++ b/vendor/github.com/google/s2a-go/internal/v2/BUILD.bazel @@ -15,8 +15,8 @@ go_library( "//vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore", "//vendor/github.com/google/s2a-go/retry", "//vendor/github.com/google/s2a-go/stream", - "@com_github_golang_protobuf//proto:go_default_library", - "@org_golang_google_grpc//:go_default_library", + "@com_github_golang_protobuf//proto", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//grpclog", ], diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 34107104..433693a6 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.1" + "v2": "2.12.5" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index e16ab603..b64522df 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,33 @@ # Changelog +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + +## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) + + +### Bug Fixes + +* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf)) + +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + +## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) + + +### Bug Fixes + +* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) + ## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065..7de60773 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go index 9aab3d91..f5af5c99 100644 --- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -74,9 +74,27 @@ func SetHeaders(ctx context.Context, keyvals ...string) context.Context { h, ok := ctx.Value(headerKey).(map[string][]string) if !ok { h = make(map[string][]string) + } else { + h = cloneHeaders(h) } + for i := 0; i < len(keyvals); i = i + 2 { h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) } return context.WithValue(ctx, headerKey, h) } + +// cloneHeaders makes a new key-value map while reusing the value slices. +// As such, new values should be appended to the value slice, and modifying +// indexed values is not thread safe. +// +// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. +func cloneHeaders(h map[string][]string) map[string][]string { + c := make(map[string][]string, len(h)) + for k, v := range h { + vc := make([]string, len(v)) + copy(vc, v) + c[k] = vc + } + return c +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 3e53729e..f5273985 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -163,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} + } + return out } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 890d4819..4f780f46 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.1" +const Version = "2.12.5" diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go index cc4486eb..9b690d40 100644 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -111,7 +111,8 @@ func (s *ProtoJSONStream) Recv() (proto.Message, error) { // Initialize a new instance of the protobuf message to unmarshal the // raw data into. m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + err := unm.Unmarshal(raw, m) return m, err } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/BUILD.bazel index 798c6df3..8266844f 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/BUILD.bazel @@ -10,5 +10,5 @@ go_library( importmap = "peridot.resf.org/vendor/github.com/grpc-ecosystem/go-grpc-middleware", importpath = "github.com/grpc-ecosystem/go-grpc-middleware", visibility = ["//visibility:public"], - deps = ["@org_golang_google_grpc//:go_default_library"], + deps = ["@org_golang_google_grpc//:grpc"], ) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/BUILD.bazel index 93ee9732..f05ae2f5 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/BUILD.bazel @@ -13,7 +13,7 @@ go_library( deps = [ "//vendor/github.com/grpc-ecosystem/go-grpc-middleware", "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/BUILD.bazel index 6b41a144..db6346e7 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/BUILD.bazel @@ -14,7 +14,7 @@ go_library( deps = [ "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils", "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//metadata", "@org_golang_google_grpc//status", diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/BUILD.bazel index 81c53ca2..b2eb7f83 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/validator/BUILD.bazel @@ -10,7 +10,7 @@ go_library( importpath = "github.com/grpc-ecosystem/go-grpc-middleware/validator", visibility = ["//visibility:public"], deps = [ - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel index ea4412a9..96ae7bdb 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel @@ -17,7 +17,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/prometheus/client_golang/prometheus", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", "@org_golang_x_net//context", diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index 8d306bf5..fe28d15b 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -32,6 +32,7 @@ func DefaultPooledTransport() *http.Transport { IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, } return transport diff --git a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel b/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel index 78b80845..3e0947c4 100644 --- a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel +++ b/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel @@ -5,7 +5,6 @@ go_library( srcs = [ "trap_others.go", "trap_windows.go", - "trap_windows_1.4.go", ], importmap = "peridot.resf.org/vendor/github.com/inconshreveable/mousetrap", importpath = "github.com/inconshreveable/mousetrap", diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE index 5f0d1fb6..5f920e97 100644 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -1,13 +1,201 @@ -Copyright 2014 Alan Shreve + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - http://www.apache.org/licenses/LICENSE-2.0 + 1. Definitions. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Alan Shreve (@inconshreveable) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4b..06a91f08 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5..0c568802 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,79 +1,30 @@ -// +build windows -// +build !go1.4 - package mousetrap import ( - "fmt" - "os" "syscall" "unsafe" ) -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) if err != nil { - return + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } } - - pid = int(pe.th32ParentProcessID) - return } // StartedByExplorer returns true if the program was invoked by the user double-clicking @@ -83,16 +34,9 @@ func getppid() (pid int, err error) { // It does not guarantee that the program was run from a terminal. It only can tell you // whether it was launched from explorer.exe func StartedByExplorer() bool { - ppid, err := getppid() + pe, err := getProcessEntry(syscall.Getppid()) if err != nil { return false } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) } diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c..00000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/kevinburke/ssh_config/.travis.yml b/vendor/github.com/kevinburke/ssh_config/.travis.yml deleted file mode 100644 index 4306f30f..00000000 --- a/vendor/github.com/kevinburke/ssh_config/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -go_import_path: github.com/kevinburke/ssh_config - -language: go - -go: - - 1.11.x - - 1.12.x - - master - -before_script: - - go get -u ./... - -script: - - make race-test diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt index cd337940..311aeb1b 100644 --- a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt +++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt @@ -1,5 +1,9 @@ +Carlos A Becker +Dustin Spicuzza Eugene Terentev Kevin Burke Mark Nevill +Scott Lessans Sergey Lukjanov Wayne Ashley Berry +santosh653 <70637961+santosh653@users.noreply.github.com> diff --git a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md new file mode 100644 index 00000000..d32a3f51 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md @@ -0,0 +1,19 @@ +# Changes + +## Version 1.2 + +Previously, if a Host declaration or a value had trailing whitespace, that +whitespace would have been included as part of the value. This led to unexpected +consequences. For example: + +``` +Host example # A comment + HostName example.com # Another comment +``` + +Prior to version 1.2, the value for Host would have been "example " and the +value for HostName would have been "example.com ". Both of these are +unintuitive. + +Instead, we strip the trailing whitespace in the configuration, which leads to +more intuitive behavior. diff --git a/vendor/github.com/kevinburke/ssh_config/Makefile b/vendor/github.com/kevinburke/ssh_config/Makefile index a1880d18..df7ee728 100644 --- a/vendor/github.com/kevinburke/ssh_config/Makefile +++ b/vendor/github.com/kevinburke/ssh_config/Makefile @@ -19,8 +19,11 @@ race-test: lint $(BUMP_VERSION): go get -u github.com/kevinburke/bump_version +$(WRITE_MAILMAP): + go get -u github.com/kevinburke/write_mailmap + release: test | $(BUMP_VERSION) - $(BUMP_VERSION) minor config.go + $(BUMP_VERSION) --tag-prefix=v minor config.go force: ; diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md index 52cc1eac..f14b2168 100644 --- a/vendor/github.com/kevinburke/ssh_config/README.md +++ b/vendor/github.com/kevinburke/ssh_config/README.md @@ -17,6 +17,14 @@ want to retrieve. port := ssh_config.Get("myhost", "Port") ``` +Certain directives can occur multiple times for a host (such as `IdentityFile`), +so you should use the `GetAll` or `GetAllStrict` directive to retrieve those +instead. + +```go +files := ssh_config.GetAll("myhost", "IdentityFile") +``` + You can also load a config file and read values from it. ```go @@ -76,6 +84,9 @@ file format. ## Donating -Donations free up time to make improvements to the library, and respond to -bug reports. You can send donations via Paypal's "Send Money" feature to -kev@inburke.com. Donations are not tax deductible in the USA. +I don't get paid to maintain this project. Donations free up time to make +improvements to the library, and respond to bug reports. You can send donations +via Paypal's "Send Money" feature to kev@inburke.com. Donations are not tax +deductible in the USA. + +You can also reach out about a consulting engagement: https://burke.services diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go index 136f0c35..00d815c1 100644 --- a/vendor/github.com/kevinburke/ssh_config/config.go +++ b/vendor/github.com/kevinburke/ssh_config/config.go @@ -34,7 +34,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" osuser "os/user" "path/filepath" @@ -44,7 +43,7 @@ import ( "sync" ) -const version = "1.0" +const version = "1.2" var _ = version @@ -102,6 +101,13 @@ func findVal(c *Config, alias, key string) (string, error) { return val, nil } +func findAll(c *Config, alias, key string) ([]string, error) { + if c == nil { + return nil, nil + } + return c.GetAll(alias, key) +} + // Get finds the first value for key within a declaration that matches the // alias. Get returns the empty string if no value was found, or if IgnoreErrors // is false and we could not parse the configuration file. Use GetStrict to @@ -114,19 +120,51 @@ func Get(alias, key string) string { return DefaultUserSettings.Get(alias, key) } +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetAllStrict to disambiguate the +// latter cases. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The match for key is case insensitive. +// +// GetAll is a wrapper around DefaultUserSettings.GetAll. +func GetAll(alias, key string) []string { + return DefaultUserSettings.GetAll(alias, key) +} + // GetStrict finds the first value for key within a declaration that matches the // alias. If key has a default value and no matching configuration is found, the // default will be returned. For more information on default values and the way // patterns are matched, see the manpage for ssh_config. // -// error will be non-nil if and only if a user's configuration file or the -// system configuration file could not be parsed, and u.IgnoreErrors is false. +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. // // GetStrict is a wrapper around DefaultUserSettings.GetStrict. func GetStrict(alias, key string) (string, error) { return DefaultUserSettings.GetStrict(alias, key) } +// GetAllStrict retrieves zero or more directives for key for the given alias. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetAllStrict is a wrapper around DefaultUserSettings.GetAllStrict. +func GetAllStrict(alias, key string) ([]string, error) { + return DefaultUserSettings.GetAllStrict(alias, key) +} + // Get finds the first value for key within a declaration that matches the // alias. Get returns the empty string if no value was found, or if IgnoreErrors // is false and we could not parse the configuration file. Use GetStrict to @@ -141,6 +179,17 @@ func (u *UserSettings) Get(alias, key string) string { return val } +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetStrict to disambiguate the latter +// cases. +// +// The match for key is case insensitive. +func (u *UserSettings) GetAll(alias, key string) []string { + val, _ := u.GetAllStrict(alias, key) + return val +} + // GetStrict finds the first value for key within a declaration that matches the // alias. If key has a default value and no matching configuration is found, the // default will be returned. For more information on default values and the way @@ -149,6 +198,52 @@ func (u *UserSettings) Get(alias, key string) string { // error will be non-nil if and only if a user's configuration file or the // system configuration file could not be parsed, and u.IgnoreErrors is false. func (u *UserSettings) GetStrict(alias, key string) (string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return "", u.onceErr + } + val, err := findVal(u.userConfig, alias, key) + if err != nil || val != "" { + return val, err + } + val2, err2 := findVal(u.systemConfig, alias, key) + if err2 != nil || val2 != "" { + return val2, err2 + } + return Default(key), nil +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return nil, u.onceErr + } + val, err := findAll(u.userConfig, alias, key) + if err != nil || val != nil { + return val, err + } + val2, err2 := findAll(u.systemConfig, alias, key) + if err2 != nil || val2 != nil { + return val2, err2 + } + // TODO: IdentityFile has multiple default values that we should return. + if def := Default(key); def != "" { + return []string{def}, nil + } + return []string{}, nil +} + +func (u *UserSettings) doLoadConfigs() { u.loadConfigs.Do(func() { // can't parse user file, that's ok. var filename string @@ -176,19 +271,6 @@ func (u *UserSettings) GetStrict(alias, key string) (string, error) { return } }) - //lint:ignore S1002 I prefer it this way - if u.onceErr != nil && u.IgnoreErrors == false { - return "", u.onceErr - } - val, err := findVal(u.userConfig, alias, key) - if err != nil || val != "" { - return val, err - } - val2, err2 := findVal(u.systemConfig, alias, key) - if err2 != nil || val2 != "" { - return val2, err2 - } - return Default(key), nil } func parseFile(filename string) (*Config, error) { @@ -196,7 +278,7 @@ func parseFile(filename string) (*Config, error) { } func parseWithDepth(filename string, depth uint8) (*Config, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -211,13 +293,19 @@ func isSystem(filename string) bool { // Decode reads r into a Config, or returns an error if r could not be parsed as // an SSH config file. func Decode(r io.Reader) (*Config, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return nil, err } return decodeBytes(b, false, 0) } +// DecodeBytes reads b into a Config, or returns an error if r could not be +// parsed as an SSH config file. +func DecodeBytes(b []byte) (*Config, error) { + return decodeBytes(b, false, 0) +} + func decodeBytes(b []byte, system bool, depth uint8) (c *Config, err error) { defer func() { if r := recover(); r != nil { @@ -282,6 +370,42 @@ func (c *Config) Get(alias, key string) (string, error) { return "", nil } +// GetAll returns all values in the configuration that match the alias and +// contains key, or nil if none are present. +func (c *Config) GetAll(alias, key string) ([]string, error) { + lowerKey := strings.ToLower(key) + all := []string(nil) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + all = append(all, t.Value) + } + case *Include: + val, _ := t.GetAll(alias, key) + if len(val) > 0 { + all = append(all, val...) + } + default: + return nil, fmt.Errorf("unknown Node type %v", t) + } + } + } + + return all, nil +} + // String returns a string representation of the Config file. func (c Config) String() string { return marshal(c).String() @@ -374,7 +498,10 @@ type Host struct { // A Node is either a key/value pair or a comment line. Nodes []Node // EOLComment is the comment (if any) terminating the Host line. - EOLComment string + EOLComment string + // Whitespace if any between the Host declaration and a trailing comment. + spaceBeforeComment string + hasEquals bool leadingSpace int // TODO: handle spaces vs tabs here. // The file starts with an implicit "Host *" declaration. @@ -406,7 +533,7 @@ func (h *Host) Matches(alias string) bool { // String prints h as it would appear in a config file. Minor tweaks may be // present in the whitespace in the printed file. func (h *Host) String() string { - var buf bytes.Buffer + var buf strings.Builder //lint:ignore S1002 I prefer to write it this way if h.implicit == false { buf.WriteString(strings.Repeat(" ", int(h.leadingSpace))) @@ -422,8 +549,9 @@ func (h *Host) String() string { buf.WriteString(" ") } } + buf.WriteString(h.spaceBeforeComment) if h.EOLComment != "" { - buf.WriteString(" #") + buf.WriteByte('#') buf.WriteString(h.EOLComment) } buf.WriteByte('\n') @@ -444,12 +572,14 @@ type Node interface { // KV is a line in the config file that contains a key, a value, and possibly // a comment. type KV struct { - Key string - Value string - Comment string - hasEquals bool - leadingSpace int // Space before the key. TODO handle spaces vs tabs. - position Position + Key string + Value string + // Whitespace after the value but before any comment + spaceAfterValue string + Comment string + hasEquals bool + leadingSpace int // Space before the key. TODO handle spaces vs tabs. + position Position } // Pos returns k's Position. @@ -457,8 +587,7 @@ func (k *KV) Pos() Position { return k.position } -// String prints k as it was parsed in the config file. There may be slight -// changes to the whitespace between values. +// String prints k as it was parsed in the config file. func (k *KV) String() string { if k == nil { return "" @@ -467,9 +596,9 @@ func (k *KV) String() string { if k.hasEquals { equals = " = " } - line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value) + line := strings.Repeat(" ", int(k.leadingSpace)) + k.Key + equals + k.Value + k.spaceAfterValue if k.Comment != "" { - line += " #" + k.Comment + line += "#" + k.Comment } return line } @@ -611,6 +740,31 @@ func (inc *Include) Get(alias, key string) string { return "" } +// GetAll finds all values in the Include statement matching the alias and the +// given key. +func (inc *Include) GetAll(alias, key string) ([]string, error) { + inc.mu.Lock() + defer inc.mu.Unlock() + var vals []string + + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.GetAll(alias, key) + if err == nil && len(val) != 0 { + // In theory if SupportsMultiple was false for this key we could + // stop looking here. But the caller has asked us to find all + // instances of the keyword (and could use Get() if they wanted) so + // let's keep looking. + vals = append(vals, val...) + } + } + return vals, nil +} + // String prints out a string representation of this Include directive. Note // included Config files are not printed as part of this representation. func (inc *Include) String() string { diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go index 36c42055..2b1e718c 100644 --- a/vendor/github.com/kevinburke/ssh_config/parser.go +++ b/vendor/github.com/kevinburke/ssh_config/parser.go @@ -3,6 +3,7 @@ package ssh_config import ( "fmt" "strings" + "unicode" ) type sshParser struct { @@ -122,11 +123,16 @@ func (p *sshParser) parseKV() sshParserStateFn { } patterns = append(patterns, pat) } + // val.val at this point could be e.g. "example.com " + hostval := strings.TrimRightFunc(val.val, unicode.IsSpace) + spaceBeforeComment := val.val[len(hostval):] + val.val = hostval p.config.Hosts = append(p.config.Hosts, &Host{ - Patterns: patterns, - Nodes: make([]Node, 0), - EOLComment: comment, - hasEquals: hasEquals, + Patterns: patterns, + Nodes: make([]Node, 0), + EOLComment: comment, + spaceBeforeComment: spaceBeforeComment, + hasEquals: hasEquals, }) return p.parseStart } @@ -144,13 +150,16 @@ func (p *sshParser) parseKV() sshParserStateFn { lastHost.Nodes = append(lastHost.Nodes, inc) return p.parseStart } + shortval := strings.TrimRightFunc(val.val, unicode.IsSpace) + spaceAfterValue := val.val[len(shortval):] kv := &KV{ - Key: key.val, - Value: val.val, - Comment: comment, - hasEquals: hasEquals, - leadingSpace: key.Position.Col - 1, - position: key.Position, + Key: key.val, + Value: shortval, + spaceAfterValue: spaceAfterValue, + Comment: comment, + hasEquals: hasEquals, + leadingSpace: key.Position.Col - 1, + position: key.Position, } lastHost.Nodes = append(lastHost.Nodes, kv) return p.parseStart diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go index 29fab6a9..5977f909 100644 --- a/vendor/github.com/kevinburke/ssh_config/validators.go +++ b/vendor/github.com/kevinburke/ssh_config/validators.go @@ -160,3 +160,27 @@ var defaults = map[string]string{ strings.ToLower("VisualHostKey"): "no", strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth", } + +// these identities are used for SSH protocol 2 +var defaultProtocol2Identities = []string{ + "~/.ssh/id_dsa", + "~/.ssh/id_ecdsa", + "~/.ssh/id_ed25519", + "~/.ssh/id_rsa", +} + +// these directives support multiple items that can be collected +// across multiple files +var pluralDirectives = map[string]bool{ + "CertificateFile": true, + "IdentityFile": true, + "DynamicForward": true, + "RemoteForward": true, + "SendEnv": true, + "SetEnv": true, +} + +// SupportsMultiple reports whether a directive can be specified multiple times. +func SupportsMultiple(key string) bool { + return pluralDirectives[strings.ToLower(key)] +} diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml deleted file mode 100644 index f07376f9..00000000 --- a/vendor/github.com/magiconair/properties/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - "1.12.x" - - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index 176626a1..842e8e24 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,8 +1,74 @@ ## Changelog +### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 + + * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge + + Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. + + * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions + +### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 + + * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error + + Thanks to [@ellie](https://github.com/ellie) for the patch. + + * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible + + This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the + author happy until it affects real users. + + Thanks to [@maage](https://github.com/maage) for the patch. + +### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 + + * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments + + When reading comments \ are loaded correctly, but when writing they are then + replaced by \\. This leads to wrong comments when writing and reading multiple times. + + Thanks to [@doxsch](https://github.com/doxsch) for the patch. + +### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 + + * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references + + Thanks to [@sriv](https://github.com/sriv) for the patch. + +### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 + + * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference + + The change is include the key in the error message which is causing the circular + reference when parsing/loading the properties files. + + Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. + +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + ### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - * [PR #26](https://github.com/magiconair/properties/pull/35): Close body always after request + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request This patch ensures that in `LoadURL` the response body is always closed. diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE.md similarity index 84% rename from vendor/github.com/magiconair/properties/LICENSE rename to vendor/github.com/magiconair/properties/LICENSE.md index b387087c..79c87e3e 100644 --- a/vendor/github.com/magiconair/properties/LICENSE +++ b/vendor/github.com/magiconair/properties/LICENSE.md @@ -1,15 +1,14 @@ -goproperties - properties file decoder for Go - -Copyright (c) 2013-2018 - Frank Schroeder +Copyright (c) 2013-2020, Frank Schroeder All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, + + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 42ed5c37..e2edda02 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,6 +1,5 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) [![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![CircleCI Status](https://img.shields.io/circleci/project/github/magiconair/properties.svg?label=circle+ci&style=flat-square)](https://circleci.com/gh/magiconair/properties) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index 3ebf8049..8e6aa441 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -48,49 +48,49 @@ import ( // // Examples: // -// // Field is ignored. -// Field int `properties:"-"` +// // Field is ignored. +// Field int `properties:"-"` // -// // Field is assigned value of 'Field'. -// Field int +// // Field is assigned value of 'Field'. +// Field int // -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` // -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` // -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` // -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` // -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string // -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` // -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct // -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` // -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string // -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` func (p *Properties) Decode(x interface{}) error { t, v := reflect.TypeOf(x), reflect.ValueOf(x) if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go index f8822da2..7c797931 100644 --- a/vendor/github.com/magiconair/properties/doc.go +++ b/vendor/github.com/magiconair/properties/doc.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -13,7 +13,7 @@ // // To load a single properties file use MustLoadFile(): // -// p := properties.MustLoadFile(filename, properties.UTF8) +// p := properties.MustLoadFile(filename, properties.UTF8) // // To load multiple properties files use MustLoadFiles() // which loads the files in the given order and merges the @@ -23,25 +23,25 @@ // Filenames can contain environment variables which are expanded // before loading. // -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) // // All of the different key/value delimiters ' ', ':' and '=' are // supported as well as the comment characters '!' and '#' and // multi-line values. // -// ! this is a comment -// # and so is this +// ! this is a comment +// # and so is this // -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue // // Properties stores all comments preceding a key and provides // GetComments() and SetComments() methods to retrieve and @@ -55,62 +55,62 @@ // and malformed expressions are not allowed and cause an // error. Expansion of environment variables is supported. // -// # standard property -// key = value +// # standard property +// key = value // -// # property expansion: key2 = value -// key2 = ${key} +// # property expansion: key2 = value +// key2 = ${key} // -// # recursive expansion: key3 = value -// key3 = ${key2} +// # recursive expansion: key3 = value +// key3 = ${key2} // -// # circular reference (error) -// key = ${key} +// # circular reference (error) +// key = ${key} // -// # malformed expression (error) -// key = ${ke +// # malformed expression (error) +// key = ${ke // -// # refers to the users' home dir -// home = ${HOME} +// # refers to the users' home dir +// home = ${HOME} // -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} +// # local key takes precedence over env var: u = foo +// USER = foo +// u = ${USER} // // The default property expansion format is ${key} but can be // changed by setting different pre- and postfix values on the // Properties object. // -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" // // Properties provides convenience functions for getting typed // values with default values if the key does not exist or the // type conversion failed. // -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) // -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) // // As an alternative properties may be applied with the standard // library's flag implementation at any time. // -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() // -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) // // Properties provides several MustXXX() convenience functions // which will terminate the app if an error occurs. The behavior @@ -119,30 +119,30 @@ // of logging the error set a different ErrorHandler before // you use the Properties package. // -// properties.ErrorHandler = properties.PanicHandler +// properties.ErrorHandler = properties.PanicHandler // -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") // // You can also provide your own ErrorHandler function. The only requirement // is that the error handler function must exit after handling the error. // -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } // -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") // // Properties can also be loaded into a struct via the `Decode` // method, e.g. // -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } // // See `Decode()` method for the full documentation. // @@ -152,5 +152,4 @@ // http://en.wikipedia.org/wiki/.properties // // http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -// package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go index 74d38dc6..35d0ae97 100644 --- a/vendor/github.com/magiconair/properties/integrate.go +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,8 +10,9 @@ import "flag" // the respective key for flag.Flag.Name. // // It's use is recommended with command line arguments as in: -// flag.Parse() -// p.MustFlag(flag.CommandLine) +// +// flag.Parse() +// p.MustFlag(flag.CommandLine) func (p *Properties) MustFlag(dst *flag.FlagSet) { m := make(map[string]*flag.Flag) dst.VisitAll(func(f *flag.Flag) { diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go index 367166d5..3d15a1f6 100644 --- a/vendor/github.com/magiconair/properties/lex.go +++ b/vendor/github.com/magiconair/properties/lex.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // @@ -128,18 +128,6 @@ func (l *lexer) acceptRun(valid string) { l.backup() } -// acceptRunUntil consumes a run of runes up to a terminator. -func (l *lexer) acceptRunUntil(term rune) { - for term != l.next() { - } - l.backup() -} - -// hasText returns true if the current parsed text is not empty. -func (l *lexer) isNotEmpty() bool { - return l.pos > l.start -} - // lineNumber reports which line we're on, based on the position of // the previous item returned by nextItem. Doing it this way // means we don't have to worry about peek double counting. diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index ab953253..635368dc 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -132,11 +132,12 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { } ct := resp.Header.Get("Content-Type") + ct = strings.Join(strings.Fields(ct), "") var enc Encoding switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": + case "", "text/plain;charset=utf-8": enc = UTF8 default: return nil, fmt.Errorf("properties: invalid content type %s", ct) diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go index cdc4a803..fccfd39f 100644 --- a/vendor/github.com/magiconair/properties/parser.go +++ b/vendor/github.com/magiconair/properties/parser.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -59,14 +59,6 @@ func (p *parser) errorf(format string, args ...interface{}) { panic(fmt.Errorf(format, args...)) } -func (p *parser) expect(expected itemType) (token item) { - token = p.lex.nextItem() - if token.typ != expected { - p.unexpected(token) - } - return token -} - func (p *parser) expectOneOf(expected ...itemType) (token item) { token = p.lex.nextItem() for _, v := range expected { @@ -91,5 +83,4 @@ func (p *parser) recover(errp *error) { } *errp = e.(error) } - return } diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index cb3d1a33..fb2f7b40 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,11 +8,13 @@ package properties // BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. import ( + "bytes" "fmt" "io" "log" "os" "regexp" + "sort" "strconv" "strings" "time" @@ -69,6 +71,9 @@ type Properties struct { // Stores the keys in order of appearance. k []string + + // WriteSeparator specifies the separator of key and value while writing the properties. + WriteSeparator string } // NewProperties creates a new Properties struct with the default @@ -111,7 +116,7 @@ func (p *Properties) Get(key string) (value string, ok bool) { // circular references and malformed expressions // so we panic if we still get an error here. if err != nil { - ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + ErrorHandler(err) } return expanded, true @@ -586,6 +591,12 @@ func (p *Properties) String() string { return s } +// Sort sorts the properties keys in alphabetical order. +// This is helpfully before writing the properties. +func (p *Properties) Sort() { + sort.Strings(p.k) +} + // Write writes all unexpanded 'key = value' pairs to the given writer. // Write returns the number of bytes written and any write error encountered. func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { @@ -626,7 +637,7 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) if err != nil { return } @@ -635,8 +646,11 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } } } - - x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + sep := " = " + if p.WriteSeparator != "" { + sep = p.WriteSeparator + } + x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) if err != nil { return } @@ -686,22 +700,17 @@ func (p *Properties) Delete(key string) { // Merge merges properties, comments and keys from other *Properties into p func (p *Properties) Merge(other *Properties) { + for _, k := range other.k { + if _, ok := p.m[k]; !ok { + p.k = append(p.k, k) + } + } for k, v := range other.m { p.m[k] = v } for k, v := range other.c { p.c[k] = v } - -outer: - for _, otherKey := range other.k { - for _, key := range p.k { - if otherKey == key { - continue outer - } - } - p.k = append(p.k, otherKey) - } } // ---------------------------------------------------------------------------- @@ -753,7 +762,12 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s for _, k := range keys { if key == k { - return "", fmt.Errorf("circular reference") + var b bytes.Buffer + b.WriteString("circular reference in:\n") + for _, k1 := range keys { + fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) + } + return "", fmt.Errorf(b.String()) } } @@ -767,7 +781,6 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s } s = s[:start] + new_val + s[end+1:] } - return s, nil } // encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. @@ -820,6 +833,8 @@ func escape(r rune, special string) string { return "\\r" case '\t': return "\\t" + case '\\': + return "\\\\" default: if strings.ContainsRune(special, r) { return "\\" + string(r) diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go index b013a2e5..dbd60b36 100644 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -1,4 +1,4 @@ -// Copyright 2018 Frank Schroeder. All rights reserved. +// Copyright 2013-2022 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel index aadcadf8..d5b71cde 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel @@ -10,5 +10,5 @@ go_library( importmap = "peridot.resf.org/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil", importpath = "github.com/matttproud/golang_protobuf_extensions/pbutil", visibility = ["//visibility:public"], - deps = ["@com_github_golang_protobuf//proto:go_default_library"], + deps = ["@com_github_golang_protobuf//proto"], ) diff --git a/vendor/github.com/mitchellh/go-homedir/BUILD.bazel b/vendor/github.com/mitchellh/go-homedir/BUILD.bazel deleted file mode 100644 index a6accc5d..00000000 --- a/vendor/github.com/mitchellh/go-homedir/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go-homedir", - srcs = ["homedir.go"], - importmap = "peridot.resf.org/vendor/github.com/mitchellh/go-homedir", - importpath = "github.com/mitchellh/go-homedir", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5..00000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537..00000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 1955f287..c7582349 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,6 +1,29 @@ -## unreleased +## 1.5.0 -* Fix regression where `*time.Time` value would be set to empty and not be sent +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent to decode hooks properly [GH-232] ## 1.4.0 diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 92e6f76f..3a754ca7 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -62,7 +62,8 @@ func DecodeHookExec( func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error - var data interface{} + data := f.Interface() + newFrom := f for _, f1 := range fs { data, err = DecodeHookExec(f1, newFrom, t) @@ -76,6 +77,28 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } } +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + // StringToSliceHookFunc returns a DecodeHookFunc that converts // string to []string by splitting on the given sep. func StringToSliceHookFunc(sep string) DecodeHookFunc { diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 3643901f..1efb22ac 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -122,7 +122,7 @@ // field value is zero and a numeric type, the field is empty, and it won't // be encoded into the destination type. // -// type Source { +// type Source struct { // Age int `mapstructure:",omitempty"` // } // @@ -192,7 +192,7 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) -// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) @@ -215,6 +215,12 @@ type DecoderConfig struct { // (extra keys). ErrorUnused bool + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + // ZeroFields, if set to true, will zero fields before writing them. // For example, a map will be emptied before decoded values are put in // it. If this is false, a map will be merged. @@ -258,6 +264,15 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -279,6 +294,11 @@ type Metadata struct { // Unused is a slice of keys that were found in the raw value but // weren't decoded since there was no matching field in the result interface Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string } // Decode takes an input structure and uses reflection to translate it to @@ -370,12 +390,20 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { if config.Metadata.Unused == nil { config.Metadata.Unused = make([]string, 0) } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } } if config.TagName == "" { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -675,16 +703,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e } case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": jn := data.(json.Number) - i, err := jn.Int64() + i, err := strconv.ParseUint(string(jn), 0, 64) if err != nil { return fmt.Errorf( "error decoding json.Number into %s: %s", name, err) } - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) + val.SetUint(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", @@ -901,9 +925,15 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re tagValue := f.Tag.Get(d.config.TagName) keyName := f.Name + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + // Determine the name of the key in the map if index := strings.Index(tagValue, ","); index != -1 { if tagValue[:index] == "-" { @@ -915,7 +945,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -927,7 +957,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } } - keyName = tagValue[:index] + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } } else if len(tagValue) > 0 { if tagValue == "-" { continue @@ -1083,7 +1115,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } // If the input value is nil, then don't allocate since empty != nil - if dataVal.IsNil() { + if dataValKind != reflect.Array && dataVal.IsNil() { return nil } @@ -1245,6 +1277,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e dataValKeysUnused[dataValKey.Interface()] = struct{}{} } + targetValKeysUnused := make(map[interface{}]struct{}) errors := make([]string, 0) // This slice will keep track of all the structs we'll be decoding. @@ -1340,7 +1373,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break @@ -1349,7 +1382,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e if !rawMapVal.IsValid() { // There was no matching key in the map for the value in - // the struct. Just ignore. + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} continue } } @@ -1409,6 +1443,17 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e errors = appendErrors(errors, err) } + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + if len(errors) > 0 { return &Error{errors} } @@ -1423,6 +1468,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } } return nil @@ -1460,3 +1513,28 @@ func getKind(val reflect.Value) reflect.Kind { return kind } } + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/pelletier/go-toml/BUILD.bazel b/vendor/github.com/pelletier/go-toml/BUILD.bazel deleted file mode 100644 index 345c8f40..00000000 --- a/vendor/github.com/pelletier/go-toml/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go-toml", - srcs = [ - "doc.go", - "keysparsing.go", - "lexer.go", - "localtime.go", - "marshal.go", - "parser.go", - "position.go", - "token.go", - "toml.go", - "tomltree_create.go", - "tomltree_write.go", - ], - importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml", - importpath = "github.com/pelletier/go-toml", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 405c911c..00000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [GoDoc][godoc]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[godoc]: https://godoc.org/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb016..00000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503ae..00000000 --- a/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a..00000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 6831deb5..00000000 --- a/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# go-toml - -Go library for the [TOML](https://github.com/mojombo/toml) format. - -This library supports TOML version -[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) - -[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[godoc.org](http://godoc.org/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides two handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also availble as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index ff5376b0..00000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,230 +0,0 @@ -trigger: -- master - -stages: -- stage: fuzzit - displayName: "Run Fuzzit" - dependsOn: [] - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: submit - displayName: "Submit" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.15" - inputs: - version: "1.15" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml - - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml - - task: Bash@3 - inputs: - filePath: './fuzzit.sh' - env: - TYPE: fuzzing - FUZZIT_API_KEY: $(FUZZIT_API_KEY) - -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.15" - inputs: - version: "1.15" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.15" - inputs: - version: "1.15" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.15" - inputs: - version: "1.15" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: fuzzing - displayName: "fuzzing" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.15" - inputs: - version: "1.15" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml - - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml - - task: Bash@3 - inputs: - filePath: './fuzzit.sh' - env: - TYPE: local-regression - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - linux 1.14: - goVersion: '1.14' - imageName: 'ubuntu-latest' - mac 1.14: - goVersion: '1.14' - imageName: 'macOS-latest' - windows 1.14: - goVersion: '1.14' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.15 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040..00000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32..00000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index f45bf88b..00000000 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b..00000000 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c4..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh deleted file mode 100644 index b575a608..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzzit.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -xe - -# go-fuzz doesn't support modules yet, so ensure we do everything -# in the old style GOPATH way -export GO111MODULE="off" - -# install go-fuzz -go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - -# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) -# to add another target, make sure to create it with `fuzzit create target` -# before using `fuzzit create job` -TARGET=toml-fuzzer - -go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml -clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} - -# install fuzzit for talking to fuzzit.dev service -# or latest version: -# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 -wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 -chmod a+x fuzzit - -# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there -./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b..00000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index b1886192..00000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,807 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var dateRegexp *regexp.Regexp - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - possibleDate := l.peekString(35) - dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) - if dateSubmatches != nil && dateSubmatches[0] != "" { - l.fastForward(len(dateSubmatches[0])) - if dateSubmatches[2] == "" { // no timezone information => local date - return l.lexLocalDate - } - return l.lexDate - } - - if next == '+' || next == '-' || isDigit(next) { - return l.lexNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDate() tomlLexStateFn { - l.emit(tokenDate) - return l.lexRvalue -} - -func (l *tomlLexer) lexLocalDate() tomlLexStateFn { - l.emit(tokenLocalDate) - return l.lexRvalue -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -func init() { - // Regexp for all date/time formats supported by TOML. - // Group 1: nano precision - // Group 2: timezone - // - // /!\ also matches the empty string - // - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index a2149e96..00000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,281 +0,0 @@ -// Implementation of TOML's local date/time. -// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go -// to avoid pulling all the Google dependencies. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 032e0ffc..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1269 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - defaultValue: tagDefault, -} - -type marshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical marshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order marshalOrder - promoteAnon bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord marshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - // Check if pointer to value implements the encoding.TextUnmarshaler. - if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110b..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index 7bf40bbd..00000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,493 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var numberUnderscoreInvalidRegexp *regexp.Regexp -var hexNumberUnderscoreInvalidRegexp *regexp.Regexp - -func numberContainsInvalidUnderscore(value string) error { - if numberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in number") - } - return nil -} - -func hexNumberContainsInvalidUnderscore(value string) error { - if hexNumberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in hex number") - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) - case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) - case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) - default: - panic("invalid base") // the lexer should catch this first - } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) - } - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenDate: - layout := time.RFC3339Nano - if !strings.Contains(tok.val, "T") { - layout = strings.Replace(layout, "T", " ", 1) - } - val, err := time.ParseInLocation(layout, tok.val, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - v := strings.Replace(tok.val, " ", "T", -1) - isDateTime := false - isTime := false - for _, c := range v { - if c == 'T' || c == 't' { - isDateTime = true - break - } - if c == ':' { - isTime = true - break - } - } - - var val interface{} - var err error - - if isDateTime { - val, err = ParseLocalDateTime(v) - } else if isTime { - val, err = ParseLocalTime(v) - } else { - val, err = ParseLocalDate(v) - } - - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - } - - p.raiseError(tok, "never reached") - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} - -func init() { - numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) - hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87..00000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index 6af4ec46..00000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,134 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenDate - tokenLocalDate - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalDate", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index cbb89a9a..00000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,529 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = errors.New(r.(string)) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index ae6dac49..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,517 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // get lowest line number that is not 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = node.value - } - } - return result -} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/v2/.dockerignore similarity index 100% rename from vendor/github.com/pelletier/go-toml/.dockerignore rename to vendor/github.com/pelletier/go-toml/v2/.dockerignore diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/vendor/github.com/pelletier/go-toml/v2/.gitattributes new file mode 100644 index 00000000..34a0a21a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +benchmark/benchmark.toml text eol=lf +testdata/** text eol=lf diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore similarity index 89% rename from vendor/github.com/pelletier/go-toml/.gitignore rename to vendor/github.com/pelletier/go-toml/v2/.gitignore index e6ba63a5..4b7c4eda 100644 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -3,3 +3,5 @@ fuzz/ cmd/tomll/tomll cmd/tomljson/tomljson cmd/tomltestgen/tomltestgen +dist +tests/ diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml new file mode 100644 index 00000000..067db551 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -0,0 +1,84 @@ +[service] +golangci-lint-version = "1.39.0" + +[linters-settings.wsl] +allow-assign-and-anything = true + +[linters-settings.exhaustive] +default-signifies-exhaustive = true + +[linters] +disable-all = true +enable = [ + "asciicheck", + "bodyclose", + "cyclop", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errorlint", + "exhaustive", + # "exhaustivestruct", + "exportloopref", + "forbidigo", + # "forcetypeassert", + "funlen", + "gci", + # "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godot", + "godox", + # "goerr113", + "gofmt", + "gofumpt", + "goheader", + "goimports", + "golint", + "gomnd", + # "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "govet", + # "ifshort", + "importas", + "ineffassign", + "lll", + "makezero", + "misspell", + "nakedret", + "nestif", + "nilerr", + # "nlreturn", + "noctx", + "nolintlint", + #"paralleltest", + "prealloc", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + # "testpackage", + "thelper", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "wastedassign", + "whitespace", + # "wrapcheck", + # "wsl" +] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml new file mode 100644 index 00000000..1d8b69e6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -0,0 +1,126 @@ +before: + hooks: + - go mod tidy + - go fmt ./... + - go test ./... +builds: + - id: tomll + main: ./cmd/tomll + binary: tomll + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: tomljson + main: ./cmd/tomljson + binary: tomljson + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: jsontoml + main: ./cmd/jsontoml + binary: jsontoml + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_riscv64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 +universal_binaries: + - id: tomll + replace: true + name_template: tomll + - id: tomljson + replace: true + name_template: tomljson + - id: jsontoml + replace: true + name_template: jsontoml +archives: +- id: jsontoml + format: tar.xz + builds: + - jsontoml + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomljson + format: tar.xz + builds: + - tomljson + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomll + format: tar.xz + builds: + - tomll + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +dockers: + - id: tools + goos: linux + goarch: amd64 + ids: + - jsontoml + - tomljson + - tomll + image_templates: + - "ghcr.io/pelletier/go-toml:latest" + - "ghcr.io/pelletier/go-toml:{{ .Tag }}" + - "ghcr.io/pelletier/go-toml:v{{ .Major }}" + skip_push: false +checksum: + name_template: 'sha256sums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +release: + github: + owner: pelletier + name: go-toml + draft: true + prerelease: auto + mode: replace +changelog: + use: github-native +announce: + skip: true diff --git a/vendor/github.com/pelletier/go-toml/v2/BUILD.bazel b/vendor/github.com/pelletier/go-toml/v2/BUILD.bazel new file mode 100644 index 00000000..718da4d7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go-toml", + srcs = [ + "decode.go", + "doc.go", + "errors.go", + "localtime.go", + "marshaler.go", + "strict.go", + "types.go", + "unmarshaler.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml/v2", + importpath = "github.com/pelletier/go-toml/v2", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/pelletier/go-toml/v2/internal/characters", + "//vendor/github.com/pelletier/go-toml/v2/internal/danger", + "//vendor/github.com/pelletier/go-toml/v2/internal/tracker", + "//vendor/github.com/pelletier/go-toml/v2/unstable", + ], +) diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md new file mode 100644 index 00000000..96ecf9e2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -0,0 +1,193 @@ +# Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use and efficient TOML +implementation for Go that gets the job done and gets out of your way – dealing +with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or small, is +more than welcomed! + +## Ask questions + +Any question you may have, somebody else might have it too. Always feel free to +ask them on the [discussion board][discussions]. We will try to answer them as +clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and ask +away! + +[discussions]: https://github.com/pelletier/go-toml/discussions + +## Improve the documentation + +The best way to share your knowledge and experience with go-toml is to improve +the documentation. Fix a typo, clarify an interface, add an example, anything +goes! + +The documentation is present in the [README][readme] and thorough the source +code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change +to the documentation, create a pull request with your proposed changes. For +simple changes like that, the easiest way to go is probably the "Fork this +project and edit the file" button on Github, displayed at the top right of the +file. Unless it's a trivial change (for example a typo), provide a little bit of +context in your pull request description or commit message. + +## Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and fix by +reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on what to +include. When in doubt: add more details! By reducing ambiguity and providing +more information, it decreases back and forth and saves everyone time. + +## Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +- A short proposal with some POC code is better than a lengthy piece of text + with no code. Code speaks louder than words. That being said, bigger changes + should probably start with a [discussion][discussions]. +- No backward-incompatible patch will be accepted unless discussed. Sometimes + it's hard, but we try not to break people's programs unless we absolutely have + to. +- If you are writing a new feature or extending an existing one, make sure to + write some documentation. +- Bug fixes need to be accompanied with regression tests. +- New code needs to be tested. +- Your commit messages need to explain why the change is needed, even if already + included in the PR description. + +It does sound like a lot, but those best practices are here to save time overall +and continuously improve the quality of the project, which is something everyone +benefits from. + +### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +### Run the tests + +You can run tests for go-toml using Go's test tool: `go test -race ./...`. + +During the pull request process, all tests will be ran on Linux, Windows, and +MacOS on the last two versions of Go. + +However, given GitHub's new policy to _not_ run Actions on pull requests until a +maintainer clicks on button, it is highly recommended that you run them locally +as you make changes. + +### Check coverage + +We use `go tool cover` to compute test coverage. Most code editors have a way to +run and display code coverage, but at the end of the day, we do this: + +``` +go test -covermode=atomic -coverprofile=coverage.out +go tool cover -func=coverage.out +``` + +and verify that the overall percentage of tested code does not go down. This is +a requirement. As a rule of thumb, all lines of code touched by your changes +should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your +code lowers the coverage. + +### Verify performance + +Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's +builtin benchmark systems. Because of their noisy nature, containers provided by +Github Actions cannot be reliably used for benchmarking. As a result, you are +responsible for checking that your changes do not incur a performance penalty. +You can run their following to execute benchmarks: + +``` +go test ./... -bench=. -count=10 +``` + +Benchmark results should be compared against each other with +[benchstat][benchstat]. Typical flow looks like this: + +1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to + a file (for example `old.txt`). +2. Make some code changes. +3. Run `go test ....` again, and save the output to an other file (for example + `new.txt`). +4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any + test. + +On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts +performance. + +It is highly encouraged to add the benchstat results to your pull request +description. Pull requests that lower performance will receive more scrutiny. + +[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat + +### Style + +Try to look around and follow the same format and structure as the rest of the +code. We enforce using `go fmt` on the whole code base. + +--- + +## Maintainers-only + +### Merge pull request + +Checklist: + +- Passing CI. +- Does not introduce backward-incompatible changes (unless discussed). +- Has relevant doc changes. +- Benchstat does not show performance regression. +- Pull request is [labeled appropriately][pr-labels]. +- Title will be understandable in the changelog. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +### New release + +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: +``` +git checkout v2 +git pull +git tag v2.2.0 +git push --tags +``` +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[new-release]: https://github.com/pelletier/go-toml/releases/new +[gh]: https://github.com/cli/cli +[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/vendor/github.com/pelletier/go-toml/v2/Dockerfile new file mode 100644 index 00000000..b9e93323 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch +ENV PATH "$PATH:/bin" +COPY tomll /bin/tomll +COPY tomljson /bin/tomljson +COPY jsontoml /bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE similarity index 94% rename from vendor/github.com/pelletier/go-toml/LICENSE rename to vendor/github.com/pelletier/go-toml/v2/LICENSE index 583bdae6..991e2ae9 100644 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ b/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -1,6 +1,7 @@ The MIT License (MIT) -Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton +go-toml v2 +Copyright (c) 2021 - 2023 Thomas Pelletier Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md new file mode 100644 index 00000000..d964b25f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -0,0 +1,576 @@ +# go-toml v2 + +Go library for the [TOML](https://toml.io/en/) format. + +This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). + +[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) + +[💬 Anything else](https://github.com/pelletier/go-toml/discussions) + +## Documentation + +Full API, examples, and implementation notes are available in the Go +documentation. + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) + +## Import + +```go +import "github.com/pelletier/go-toml/v2" +``` + +See [Modules](#Modules). + +## Features + +### Stdlib behavior + +As much as possible, this library is designed to behave similarly as the +standard library's `encoding/json`. + +### Performance + +While go-toml favors usability, it is written with performance in mind. Most +operations should not be shockingly slow. See [benchmarks](#benchmarks). + +### Strict mode + +`Decoder` can be set to "strict mode", which makes it error when some parts of +the TOML document was not present in the target structure. This is a great way +to check for typos. [See example in the documentation][strict]. + +[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields + +### Contextualized errors + +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], +which contains a human readable contextualized version of the error. For +example: + +``` +1| [server] +2| path = 100 + | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string +3| port = 50 +``` + +[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError + +### Local date and time support + +TOML supports native [local date/times][ldt]. It allows to represent a given +date, time, or date-time without relation to a timezone or offset. To support +this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and +[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, +making them convenient yet unambiguous structures for their respective TOML +representation. + +[ldt]: https://toml.io/en/v1.0.0#local-date-time +[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate +[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime +[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime + +### Commented config + +Since TOML is often used for configuration files, go-toml can emit documents +annotated with [comments and commented-out values][comments-example]. For +example, it can generate the following file: + +```toml +# Host IP to connect to. +host = '127.0.0.1' +# Port of the remote server. +port = 4242 + +# Encryption parameters (optional) +# [TLS] +# cipher = 'AEAD-AES128-GCM-SHA256' +# version = 'TLS 1.3' +``` + +[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented + +## Getting started + +Given the following struct, let's see how to read it and write it as TOML: + +```go +type MyConfig struct { + Version int + Name string + Tags []string +} +``` + +### Unmarshaling + +[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its +content. For example: + +```go +doc := ` +version = 2 +name = "go-toml" +tags = ["go", "toml"] +` + +var cfg MyConfig +err := toml.Unmarshal([]byte(doc), &cfg) +if err != nil { + panic(err) +} +fmt.Println("version:", cfg.Version) +fmt.Println("name:", cfg.Name) +fmt.Println("tags:", cfg.Tags) + +// Output: +// version: 2 +// name: go-toml +// tags: [go toml] +``` + +[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +### Marshaling + +[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure +as a TOML document: + +```go +cfg := MyConfig{ + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, +} + +b, err := toml.Marshal(cfg) +if err != nil { + panic(err) +} +fmt.Println(string(b)) + +// Output: +// Version = 2 +// Name = 'go-toml' +// Tags = ['go', 'toml'] +``` + +[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal + +## Unstable API + +This API does not yet follow the backward compatibility guarantees of this +library. They provide early access to features that may have rough edges or an +API subject to change. + +### Parser + +Parser is the unstable API that allows iterative parsing of a TOML document at +the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. + +## Benchmarks + +Execution time speedup compared to other Go TOML libraries: + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
+
See more +

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

+ + + + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x
+

This table can be generated with ./ci.sh benchmark -a -html.

+
+ +## Modules + +go-toml uses Go's standard modules system. + +Installation instructions: + +- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals + with it automatically. +- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. + +In case of trouble: [Go Modules FAQ][mod-faq]. + +[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module + +## Tools + +Go-toml provides three handy command line tools: + + * `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` + + * `tomll`: Lints and reformats a TOML file. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` + +### Docker image + +Those tools are also available as a [Docker image][docker]. For example, to use +`tomljson`: + +``` +docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml +``` + +Multiple versions are available on [ghcr.io][docker]. + +[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml + +## Migrating from v1 + +This section describes the differences between v1 and v2, with some pointers on +how to get the original behavior when possible. + +### Decoding / Unmarshal + +#### Automatic field name guessing + +When unmarshaling to a struct, if a key in the TOML document does not exactly +match the name of a struct field or any of the `toml`-tagged field, v1 tries +multiple variations of the key ([code][v1-keys]). + +V2 instead does a case-insensitive matching, like `encoding/json`. + +This could impact you if you are relying on casing to differentiate two fields, +and one of them is a not using the `toml` struct tag. The recommended solution +is to be specific about tag names for those fields using the `toml` struct tag. + +[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 + +#### Ignore preexisting value in interface + +When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the +element in the interface to decode the object. For example: + +```go +type inner struct { + B interface{} +} +type doc struct { + A interface{} +} + +d := doc{ + A: inner{ + B: "Before", + }, +} + +data := ` +[A] +B = "After" +` + +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v1: %#v\n", d) + +// toml v1: main.doc{A:main.inner{B:"After"}} +``` + +In this case, field `A` is of type `interface{}`, containing a `inner` struct. +V1 sees that type and uses it when decoding the object. + +When decoding an object into an `interface{}`, V2 instead disregards whatever +value the `interface{}` may contain and replaces it with a +`map[string]interface{}`. With the same data structure as above, here is what +the result looks like: + +```go +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v2: %#v\n", d) + +// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Values out of array bounds ignored + +When decoding into an array, v1 returns an error when the number of elements +contained in the doc is superior to the capacity of the array. For example: + +```go +type doc struct { + A [2]string +} +d := doc{} +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println(err) + +// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) +``` + +In the same situation, v2 ignores the last value: + +```go +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println("err:", err, "d:", d) +// err: d: {[one two]} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Support for `toml.Unmarshaler` has been dropped + +This method was not widely used, poorly defined, and added a lot of complexity. +A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` +interface and use strings. + +#### Support for `default` struct tag has been dropped + +This feature adds complexity and a poorly defined API for an effect that can be +accomplished outside of the library. + +It does not seem like other format parsers in Go support that feature (the +project referenced in the original ticket #202 has not been updated since 2017). +Given that go-toml v2 should not touch values not in the document, the same +effect can be achieved by pre-filling the struct with defaults (libraries like +[go-defaults][go-defaults] can help). Also, string representation is not well +defined for all types: it creates issues like #278. + +The recommended replacement is pre-filling the struct before unmarshaling. + +[go-defaults]: https://github.com/mcuadros/go-defaults + +#### `toml.Tree` replacement + +This structure was the initial attempt at providing a document model for +go-toml. It allows manipulating the structure of any document, encoding and +decoding from their TOML representation. While a more robust feature was +initially planned in go-toml v2, this has been ultimately [removed from +scope][nodoc] of this library, with no plan to add it back at the moment. The +closest equivalent at the moment would be to unmarshal into an `interface{}` and +use type assertions and/or reflection to manipulate the arbitrary +structure. However this would fall short of providing all of the TOML features +such as adding comments and be specific about whitespace. + + +#### `toml.Position` are not retrievable anymore + +The API for retrieving the position (line, column) of a specific TOML element do +not exist anymore. This was done to minimize the amount of concepts introduced +by the library (query path), and avoid the performance hit related to storing +positions in the absence of a document model, for a feature that seemed to have +little use. Errors however have gained more detailed position +information. Position retrieval seems better fitted for a document model, which +has been [removed from the scope][nodoc] of go-toml v2 at the moment. + +### Encoding / Marshal + +#### Default struct fields order + +V1 emits struct fields order alphabetically by default. V2 struct fields are +emitted in order they are defined. For example: + +```go +type S struct { + B string + A string +} + +data := S{ + B: "B", + A: "A", +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +// Output: +// v1: +// A = "A" +// B = "B" + +// v2: +// B = 'B' +// A = 'A' +``` + +There is no way to make v2 encoder behave like v1. A workaround could be to +manually sort the fields alphabetically in the struct definition, or generate +struct types using `reflect.StructOf`. + +#### No indentation by default + +V1 automatically indents content of tables by default. V2 does not. However the +same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: + +```go +data := map[string]interface{}{ + "table": map[string]string{ + "key": "value", + }, +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +buf := bytes.Buffer{} +enc := tomlv2.NewEncoder(&buf) +enc.SetIndentTables(true) +enc.Encode(data) +fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) + +// Output: +// v1: +// +// [table] +// key = "value" +// +// v2: +// [table] +// key = 'value' +// +// +// v2 Encoder: +// [table] +// key = 'value' +``` + +[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables + +#### Keys and strings are single quoted + +V1 always uses double quotes (`"`) around strings and keys that cannot be +represented bare (unquoted). V2 uses single quotes instead by default (`'`), +unless a character cannot be represented, then falls back to double quotes. As a +result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not +useful anymore. + +There is no way to make v2 encoder behave like v1. + +#### `TextMarshaler` emits as a string, not TOML + +Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in +v1. The encoder would append the result to the output directly. In v2 the result +is wrapped in a string. As a result, this interface cannot be implemented by the +root object. + +There is no way to make v2 encoder behave like v1. + +[tm]: https://golang.org/pkg/encoding/#TextMarshaler + +#### `Encoder.CompactComments` has been removed + +Emitting compact comments is now the default behavior of go-toml. This option +is not necessary anymore. + +#### Struct tags have been merged + +V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, +`toml`, and `omitempty`. To behave more like the standard library, v2 has merged +`toml`, `multiline`, `commented`, and `omitempty`. For example: + +```go +type doc struct { + // v1 + F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` + // v2 + F string `toml:"field,multiline,omitempty,commented"` +} +``` + +Has a result, the `Encoder.SetTag*` methods have been removed, as there is just +one tag now. + +#### `Encoder.ArraysWithOneElementPerLine` has been renamed + +The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. + +#### `Encoder.Indentation` has been renamed + +The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. + + +#### Embedded structs behave like stdlib + +V1 defaults to merging embedded struct fields into the embedding struct. This +behavior was unexpected because it does not follow the standard library. To +avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was +added to make the encoder behave correctly. Given backward compatibility is not +a problem anymore, v2 does the right thing by default: it follows the behavior +of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. + +[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 + +### `query` + +go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run +JSONPath-style queries on TOML files. This feature is not available in v2. For a +replacement, check out [dasel][dasel]. + +This package has been removed because it was essentially not supported anymore +(last commit May 2020), increased the complexity of the code base, and more +complete solutions exist out there. + +[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query +[dasel]: https://github.com/TomWright/dasel + +## Versioning + +Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md new file mode 100644 index 00000000..d4d554fd --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -0,0 +1,16 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh new file mode 100644 index 00000000..86217a9b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -0,0 +1,284 @@ +#!/usr/bin/env bash + + +stderr() { + echo "$@" 1>&2 +} + +usage() { + b=$(basename "$0") + echo $b: ERROR: "$@" 1>&2 + + cat 1>&2 < coverage.out + go tool cover -func=coverage.out + echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +coverage() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + output_dir="$(mktemp -d)" + target_out="${output_dir}/target.txt" + head_out="${output_dir}/head.txt" + + cover "${target}" > "${target_out}" + cover "HEAD" > "${head_out}" + + cat "${target_out}" + cat "${head_out}" + + echo "" + + target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" + head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" + echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" + + delta_pct=$(echo "$head_pct - $target_pct" | bc -l) + echo "Delta: ${delta_pct}" + + if [[ $delta_pct = \-* ]]; then + echo "Regression!"; + + target_diff="${output_dir}/target.diff.txt" + head_diff="${output_dir}/head.diff.txt" + cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" + cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" + + diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" + return 1 + fi + return 0 + ;; + esac + + cover "${1-HEAD}" +} + +bench() { + branch="${1}" + out="${2}" + replace="${3}" + dir="$(mktemp -d)" + + stderr "Executing benchmark for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + + if [ "${replace}" != "" ]; then + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + go get "${replace}" + fi + + export GOMAXPROCS=2 + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +fmktemp() { + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 + else + # BSD + mktemp -t $1 + fi +} + +benchstathtml() { +python3 - $1 <<'EOF' +import sys + +lines = [] +stop = False + +with open(sys.argv[1]) as f: + for line in f.readlines(): + line = line.strip() + if line == "": + stop = True + if not stop: + lines.append(line.split(',')) + +results = [] +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue + v2 = float(line[1]) + results.append([ + line[0].replace("-32", ""), + "%.1fx" % (float(line[3])/v2), # v1 + "%.1fx" % (float(line[7])/v2), # bs + ]) +# move geomean to the end +results.append(results[0]) +del results[0] + + +def printtable(data): + print(""" + + + + + """) + + for r in data: + print(" ".format(*r)) + + print(""" +
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") + + +def match(x): + return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] + +above = [x for x in results if match(x)] +below = [x for x in results if not match(x)] + +printtable(above) +print("
See more") +print("""

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

""") +printtable(below) +print('

This table can be generated with ./ci.sh benchmark -a -html.

') +print("
") + +EOF +} + +benchmark() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + old=`fmktemp ${target}` + bench "${target}" "${old}" + + new=`fmktemp HEAD` + bench HEAD "${new}" + + benchstat "${old}" "${new}" + return 0 + ;; + -a) + shift + + v2stats=`fmktemp go-toml-v2` + bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" + v1stats=`fmktemp go-toml-v1` + bench HEAD "${v1stats}" "github.com/pelletier/go-toml" + bsstats=`fmktemp bs-toml` + bench HEAD "${bsstats}" "github.com/BurntSushi/toml" + + cp "${v2stats}" go-toml-v2.txt + cp "${v1stats}" go-toml-v1.txt + cp "${bsstats}" bs-toml.txt + + if [ "$1" = "-html" ]; then + tmpcsv=`fmktemp csv` + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstathtml $tmpcsv + else + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt + fi + + rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt + return $? + esac + + bench "${1-HEAD}" `mktemp` +} + +case "$1" in + coverage) shift; coverage $@;; + benchmark) shift; benchmark $@;; + *) usage "bad argument $1";; +esac diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go new file mode 100644 index 00000000..f0ec3b17 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -0,0 +1,550 @@ +package toml + +import ( + "fmt" + "math" + "strconv" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +func parseInteger(b []byte) (int64, error) { + if len(b) > 2 && b[0] == '0' { + switch b[1] { + case 'x': + return parseIntHex(b) + case 'b': + return parseIntBin(b) + case 'o': + return parseIntOct(b) + default: + panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) + } + } + + return parseIntDec(b) +} + +func parseLocalDate(b []byte) (LocalDate, error) { + // full-date = date-fullyear "-" date-month "-" date-mday + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + var date LocalDate + + if len(b) != 10 || b[4] != '-' || b[7] != '-' { + return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") + } + + var err error + + date.Year, err = parseDecimalDigits(b[0:4]) + if err != nil { + return LocalDate{}, err + } + + date.Month, err = parseDecimalDigits(b[5:7]) + if err != nil { + return LocalDate{}, err + } + + date.Day, err = parseDecimalDigits(b[8:10]) + if err != nil { + return LocalDate{}, err + } + + if !isValidDate(date.Year, date.Month, date.Day) { + return LocalDate{}, unstable.NewParserError(b, "impossible date") + } + + return date, nil +} + +func parseDecimalDigits(b []byte) (int, error) { + v := 0 + + for i, c := range b { + if c < '0' || c > '9' { + return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") + } + v *= 10 + v += int(c - '0') + } + + return v, nil +} + +func parseDateTime(b []byte) (time.Time, error) { + // offset-date-time = full-date time-delim full-time + // full-time = partial-time time-offset + // time-offset = "Z" / time-numoffset + // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute + + dt, b, err := parseLocalDateTime(b) + if err != nil { + return time.Time{}, err + } + + var zone *time.Location + + if len(b) == 0 { + // parser should have checked that when assigning the date time node + panic("date time should have a timezone") + } + + if b[0] == 'Z' || b[0] == 'z' { + b = b[1:] + zone = time.UTC + } else { + const dateTimeByteLen = 6 + if len(b) != dateTimeByteLen { + return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") + } + var direction int + switch b[0] { + case '-': + direction = -1 + case '+': + direction = +1 + default: + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") + } + + if b[3] != ':' { + return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") + } + + hours, err := parseDecimalDigits(b[1:3]) + if err != nil { + return time.Time{}, err + } + if hours > 23 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") + } + + minutes, err := parseDecimalDigits(b[4:6]) + if err != nil { + return time.Time{}, err + } + if minutes > 59 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") + } + + seconds := direction * (hours*3600 + minutes*60) + if seconds == 0 { + zone = time.UTC + } else { + zone = time.FixedZone("", seconds) + } + b = b[dateTimeByteLen:] + } + + if len(b) > 0 { + return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") + } + + t := time.Date( + dt.Year, + time.Month(dt.Month), + dt.Day, + dt.Hour, + dt.Minute, + dt.Second, + dt.Nanosecond, + zone) + + return t, nil +} + +func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { + var dt LocalDateTime + + const localDateTimeByteMinLen = 11 + if len(b) < localDateTimeByteMinLen { + return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + } + + date, err := parseLocalDate(b[:10]) + if err != nil { + return dt, nil, err + } + dt.LocalDate = date + + sep := b[10] + if sep != 'T' && sep != ' ' && sep != 't' { + return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") + } + + t, rest, err := parseLocalTime(b[11:]) + if err != nil { + return dt, nil, err + } + dt.LocalTime = t + + return dt, rest, nil +} + +// parseLocalTime is a bit different because it also returns the remaining +// []byte that is didn't need. This is to allow parseDateTime to parse those +// remaining bytes as a timezone. +func parseLocalTime(b []byte) (LocalTime, []byte, error) { + var ( + nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + t LocalTime + ) + + // check if b matches to have expected format HH:MM:SS[.NNNNNN] + const localTimeByteLen = 8 + if len(b) < localTimeByteLen { + return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + } + + var err error + + t.Hour, err = parseDecimalDigits(b[0:2]) + if err != nil { + return t, nil, err + } + + if t.Hour > 23 { + return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") + } + if b[2] != ':' { + return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") + } + + t.Minute, err = parseDecimalDigits(b[3:5]) + if err != nil { + return t, nil, err + } + if t.Minute > 59 { + return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") + } + if b[5] != ':' { + return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") + } + + t.Second, err = parseDecimalDigits(b[6:8]) + if err != nil { + return t, nil, err + } + + if t.Second > 60 { + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") + } + + b = b[8:] + + if len(b) >= 1 && b[0] == '.' { + frac := 0 + precision := 0 + digits := 0 + + for i, c := range b[1:] { + if !isDigit(c) { + if i == 0 { + return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") + } + break + } + digits++ + + const maxFracPrecision = 9 + if i >= maxFracPrecision { + // go-toml allows decoding fractional seconds + // beyond the supported precision of 9 + // digits. It truncates the fractional component + // to the supported precision and ignores the + // remaining digits. + // + // https://github.com/pelletier/go-toml/discussions/707 + continue + } + + frac *= 10 + frac += int(c - '0') + precision++ + } + + if precision == 0 { + return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") + } + + t.Nanosecond = frac * nspow[precision] + t.Precision = precision + + return t, b[1+digits:], nil + } + return t, b, nil +} + +//nolint:cyclop +func parseFloat(b []byte) (float64, error) { + if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { + return math.NaN(), nil + } + + cleaned, err := checkAndRemoveUnderscoresFloats(b) + if err != nil { + return 0, err + } + + if cleaned[0] == '.' { + return 0, unstable.NewParserError(b, "float cannot start with a dot") + } + + if cleaned[len(cleaned)-1] == '.' { + return 0, unstable.NewParserError(b, "float cannot end with a dot") + } + + dotAlreadySeen := false + for i, c := range cleaned { + if c == '.' { + if dotAlreadySeen { + return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") + } + if !isDigit(cleaned[i-1]) { + return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") + } + if !isDigit(cleaned[i+1]) { + return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") + } + dotAlreadySeen = true + } + } + + start := 0 + if cleaned[0] == '+' || cleaned[0] == '-' { + start = 1 + } + if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { + return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") + } + + f, err := strconv.ParseFloat(string(cleaned), 64) + if err != nil { + return 0, unstable.NewParserError(b, "unable to parse float: %w", err) + } + + return f, nil +} + +func parseIntHex(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 16, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) + } + + return i, nil +} + +func parseIntOct(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 8, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) + } + + return i, nil +} + +func parseIntBin(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 2, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) + } + + return i, nil +} + +func isSign(b byte) bool { + return b == '+' || b == '-' +} + +func parseIntDec(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b) + if err != nil { + return 0, err + } + + startIdx := 0 + + if isSign(cleaned[0]) { + startIdx++ + } + + if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { + return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") + } + + i, err := strconv.ParseInt(string(cleaned), 10, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) + } + + return i, nil +} + +func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { + start := 0 + if b[start] == '+' || b[start] == '-' { + start++ + } + + if len(b) == start { + return b, nil + } + + if b[start] == '_' { + return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, i, len(b)) + copy(cleaned, b) + + for i++; i < len(b); i++ { + c := b[i] + if c == '_' { + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + before = false + } else { + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { + if b[0] == '_' { + return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, 0, len(b)) + + for i := 0; i < len(b); i++ { + c := b[i] + + switch c { + case '_': + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") + } + before = false + case '+', '-': + // signed exponents + cleaned = append(cleaned, c) + before = false + case 'e', 'E': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") + } + cleaned = append(cleaned, c) + case '.': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") + } + if i > 0 && b[i-1] == '_' { + return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") + } + cleaned = append(cleaned, c) + default: + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +// isValidDate checks if a provided date is a date that exists. +func isValidDate(year int, month int, day int) bool { + return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m int, year int) int { + if m == 2 && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go new file mode 100644 index 00000000..b7bc599b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/doc.go @@ -0,0 +1,2 @@ +// Package toml is a library to read and write TOML documents. +package toml diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go new file mode 100644 index 00000000..309733f1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -0,0 +1,252 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/unstable" +) + +// DecodeError represents an error encountered during the parsing or decoding +// of a TOML document. +// +// In addition to the error message, it contains the position in the document +// where it happened, as well as a human-readable representation that shows +// where the error occurred in the document. +type DecodeError struct { + message string + line int + column int + key Key + + human string +} + +// StrictMissingError occurs in a TOML document that does not have a +// corresponding field in the target value. It contains all the missing fields +// in Errors. +// +// Emitted by Decoder when DisallowUnknownFields() was called. +type StrictMissingError struct { + // One error per field that could not be found. + Errors []DecodeError +} + +// Error returns the canonical string for this error. +func (s *StrictMissingError) Error() string { + return "strict mode: fields in the document are missing in the target struct" +} + +// String returns a human readable description of all errors. +func (s *StrictMissingError) String() string { + var buf strings.Builder + + for i, e := range s.Errors { + if i > 0 { + buf.WriteString("\n---\n") + } + + buf.WriteString(e.String()) + } + + return buf.String() +} + +type Key []string + +// Error returns the error message contained in the DecodeError. +func (e *DecodeError) Error() string { + return "toml: " + e.message +} + +// String returns the human-readable contextualized error. This string is multi-line. +func (e *DecodeError) String() string { + return e.human +} + +// Position returns the (line, column) pair indicating where the error +// occurred in the document. Positions are 1-indexed. +func (e *DecodeError) Position() (row int, column int) { + return e.line, e.column +} + +// Key that was being processed when the error occurred. The key is present only +// if this DecodeError is part of a StrictMissingError. +func (e *DecodeError) Key() Key { + return e.key +} + +// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// range of bytes from document. +// +// highlight needs to be a sub-slice of document, or this function panics. +// +// The function copies all bytes used in DecodeError, so that document and +// highlight can be freely deallocated. +// +//nolint:funlen +func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { + offset := danger.SubsliceOffset(document, de.Highlight) + + errMessage := de.Error() + errLine, errColumn := positionAtEnd(document[:offset]) + before, after := linesOfContext(document, de.Highlight, offset, 3) + + var buf strings.Builder + + maxLine := errLine + len(after) - 1 + lineColumnWidth := len(strconv.Itoa(maxLine)) + + // Write the lines of context strictly before the error. + for i := len(before) - 1; i > 0; i-- { + line := errLine - i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(before[i]) > 0 { + buf.WriteString(" ") + buf.Write(before[i]) + } + + buf.WriteRune('\n') + } + + // Write the document line that contains the error. + + buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.Write(before[0]) + } + + buf.Write(de.Highlight) + + if len(after) > 0 { + buf.Write(after[0]) + } + + buf.WriteRune('\n') + + // Write the line with the error message itself (so it does not have a line + // number). + + buf.WriteString(strings.Repeat(" ", lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.WriteString(strings.Repeat(" ", len(before[0]))) + } + + buf.WriteString(strings.Repeat("~", len(de.Highlight))) + + if len(errMessage) > 0 { + buf.WriteString(" ") + buf.WriteString(errMessage) + } + + // Write the lines of context strictly after the error. + + for i := 1; i < len(after); i++ { + buf.WriteRune('\n') + line := errLine + i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(after[i]) > 0 { + buf.WriteString(" ") + buf.Write(after[i]) + } + } + + return &DecodeError{ + message: errMessage, + line: errLine, + column: errColumn, + key: de.Key, + human: buf.String(), + } +} + +func formatLineNumber(line int, width int) string { + format := "%" + strconv.Itoa(width) + "d" + + return fmt.Sprintf(format, line) +} + +func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { + return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) +} + +func beforeLines(document []byte, offset int, linesAround int) [][]byte { + var beforeLines [][]byte + + // Walk the document backward from the highlight to find previous lines + // of context. + rest := document[:offset] +backward: + for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { + switch { + case rest[o] == '\n': + // handle individual lines + beforeLines = append(beforeLines, rest[o+1:]) + rest = rest[:o] + o = len(rest) - 1 + case o == 0: + // add the first line only if it's non-empty + beforeLines = append(beforeLines, rest) + + break backward + default: + o-- + } + } + + return beforeLines +} + +func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { + var afterLines [][]byte + + // Walk the document forward from the highlight to find the following + // lines of context. + rest := document[offset+len(highlight):] +forward: + for o := 0; o < len(rest) && len(afterLines) <= linesAround; { + switch { + case rest[o] == '\n': + // handle individual lines + afterLines = append(afterLines, rest[:o]) + rest = rest[o+1:] + o = 0 + + case o == len(rest)-1: + // add last line only if it's non-empty + afterLines = append(afterLines, rest) + + break forward + default: + o++ + } + } + + return afterLines +} + +func positionAtEnd(b []byte) (row int, column int) { + row = 1 + column = 1 + + for _, c := range b { + if c == '\n' { + row++ + column = 1 + } else { + column++ + } + } + + return +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/BUILD.bazel b/vendor/github.com/pelletier/go-toml/v2/internal/characters/BUILD.bazel new file mode 100644 index 00000000..e64df195 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "characters", + srcs = [ + "ascii.go", + "utf8.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml/v2/internal/characters", + importpath = "github.com/pelletier/go-toml/v2/internal/characters", + visibility = ["//vendor/github.com/pelletier/go-toml/v2:__subpackages__"], +) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go new file mode 100644 index 00000000..80f698db --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -0,0 +1,42 @@ +package characters + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func InvalidAscii(b byte) bool { + return invalidAsciiTable[b] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go new file mode 100644 index 00000000..db4f45ac --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -0,0 +1,199 @@ +package characters + +import ( + "unicode/utf8" +) + +type utf8Err struct { + Index int + Size int +} + +func (u utf8Err) Zero() bool { + return u.Size == 0 +} + +// Verified that a given string is only made of valid UTF-8 characters allowed +// by the TOML spec: +// +// Any Unicode character may be used except those that must be escaped: +// quotation mark, backslash, and the control characters other than tab (U+0000 +// to U+0008, U+000A to U+001F, U+007F). +// +// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early +// when a character is not allowed. +// +// The returned utf8Err is Zero() if the string is valid, or contains the byte +// index and size of the invalid character. +// +// quotation mark => already checked +// backslash => already checked +// 0-0x8 => invalid +// 0x9 => tab, ok +// 0xA - 0x1F => invalid +// 0x7F => invalid +func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { + // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. + offset := 0 + for len(p) >= 8 { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + // The compiler can generate a 32bit load for first32 and second32 + // on many platforms. See test/codegen/memcombine.go. + first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 + if (first32|second32)&0x80808080 != 0 { + // Found a non ASCII byte (>= RuneSelf). + break + } + + for i, b := range p[:8] { + if InvalidAscii(b) { + err.Index = offset + i + err.Size = 1 + return + } + } + + p = p[8:] + offset += 8 + } + n := len(p) + for i := 0; i < n; { + pi := p[i] + if pi < utf8.RuneSelf { + if InvalidAscii(pi) { + err.Index = offset + i + err.Size = 1 + return + } + i++ + continue + } + x := first[pi] + if x == xx { + // Illegal starter byte. + err.Index = offset + i + err.Size = 1 + return + } + size := int(x & 7) + if i+size > n { + // Short or invalid. + err.Index = offset + i + err.Size = n - i + return + } + accept := acceptRanges[x>>4] + if c := p[i+1]; c < accept.lo || accept.hi < c { + err.Index = offset + i + err.Size = 2 + return + } else if size == 2 { + } else if c := p[i+2]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 3 + return + } else if size == 3 { + } else if c := p[i+3]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 4 + return + } + i += size + } + return +} + +// Return the size of the next rune if valid, 0 otherwise. +func Utf8ValidNext(p []byte) int { + c := p[0] + + if c < utf8.RuneSelf { + if InvalidAscii(c) { + return 0 + } + return 1 + } + + x := first[c] + if x == xx { + // Illegal starter byte. + return 0 + } + size := int(x & 7) + if size > len(p) { + // Short or invalid. + return 0 + } + accept := acceptRanges[x>>4] + if c := p[1]; c < accept.lo || accept.hi < c { + return 0 + } else if size == 2 { + } else if c := p[2]; c < locb || hicb < c { + return 0 + } else if size == 3 { + } else if c := p[3]; c < locb || hicb < c { + return 0 + } + + return size +} + +// acceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence. +type acceptRange struct { + lo uint8 // lowest value for second byte. + hi uint8 // highest value for second byte. +} + +// acceptRanges has size 16 to avoid bounds checks in the code that uses it. +var acceptRanges = [16]acceptRange{ + 0: {locb, hicb}, + 1: {0xA0, hicb}, + 2: {locb, 0x9F}, + 3: {0x90, hicb}, + 4: {locb, 0x8F}, +} + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + // The default lowest and highest continuation byte. + locb = 0b10000000 + hicb = 0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/BUILD.bazel b/vendor/github.com/pelletier/go-toml/v2/internal/danger/BUILD.bazel new file mode 100644 index 00000000..c1a461c1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "danger", + srcs = [ + "danger.go", + "typeid.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml/v2/internal/danger", + importpath = "github.com/pelletier/go-toml/v2/internal/danger", + visibility = ["//vendor/github.com/pelletier/go-toml/v2:__subpackages__"], +) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go new file mode 100644 index 00000000..e38e1131 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go @@ -0,0 +1,65 @@ +package danger + +import ( + "fmt" + "reflect" + "unsafe" +) + +const maxInt = uintptr(int(^uint(0) >> 1)) + +func SubsliceOffset(data []byte, subslice []byte) int { + datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) + + if hlp.Data < datap.Data { + panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) + } + offset := hlp.Data - datap.Data + + if offset > maxInt { + panic(fmt.Errorf("slice offset larger than int (%d)", offset)) + } + + intoffset := int(offset) + + if intoffset > datap.Len { + panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) + } + + if intoffset+hlp.Len > datap.Len { + panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) + } + + return intoffset +} + +func BytesRange(start []byte, end []byte) []byte { + if start == nil || end == nil { + panic("cannot call BytesRange with nil") + } + startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) + endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) + + if startp.Data > endp.Data { + panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) + } + + l := startp.Len + endLen := int(endp.Data-startp.Data) + endp.Len + if endLen > l { + l = endLen + } + + if l > startp.Cap { + panic(fmt.Errorf("range length is larger than capacity")) + } + + return start[:l] +} + +func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { + // TODO: replace with unsafe.Add when Go 1.17 is released + // https://github.com/golang/go/issues/40481 + return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go new file mode 100644 index 00000000..9d41c28a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go @@ -0,0 +1,23 @@ +package danger + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +// +// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 +type TypeID unsafe.Pointer + +func MakeTypeID(t reflect.Type) TypeID { + // reflect.Type has the fields: + // typ unsafe.Pointer + // ptr unsafe.Pointer + return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/BUILD.bazel b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/BUILD.bazel new file mode 100644 index 00000000..a43ffa81 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "tracker", + srcs = [ + "key.go", + "seen.go", + "tracker.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml/v2/internal/tracker", + importpath = "github.com/pelletier/go-toml/v2/internal/tracker", + visibility = ["//vendor/github.com/pelletier/go-toml/v2:__subpackages__"], + deps = ["//vendor/github.com/pelletier/go-toml/v2/unstable"], +) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go new file mode 100644 index 00000000..149b17f5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -0,0 +1,48 @@ +package tracker + +import "github.com/pelletier/go-toml/v2/unstable" + +// KeyTracker is a tracker that keeps track of the current Key as the AST is +// walked. +type KeyTracker struct { + k []string +} + +// UpdateTable sets the state of the tracker with the AST table node. +func (t *KeyTracker) UpdateTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// UpdateArrayTable sets the state of the tracker with the AST array table node. +func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// Push the given key on the stack. +func (t *KeyTracker) Push(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = append(t.k, string(it.Node().Data)) + } +} + +// Pop key from stack. +func (t *KeyTracker) Pop(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = t.k[:len(t.k)-1] + } +} + +// Key returns the current key +func (t *KeyTracker) Key() []string { + k := make([]string, len(t.k)) + copy(k, t.k) + return k +} + +func (t *KeyTracker) reset() { + t.k = t.k[:0] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go new file mode 100644 index 00000000..76df2d5b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -0,0 +1,358 @@ +package tracker + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pelletier/go-toml/v2/unstable" +) + +type keyKind uint8 + +const ( + invalidKind keyKind = iota + valueKind + tableKind + arrayTableKind +) + +func (k keyKind) String() string { + switch k { + case invalidKind: + return "invalid" + case valueKind: + return "value" + case tableKind: + return "table" + case arrayTableKind: + return "array table" + } + panic("missing keyKind string mapping") +} + +// SeenTracker tracks which keys have been seen with which TOML type to flag +// duplicates and mismatches according to the spec. +// +// Each node in the visited tree is represented by an entry. Each entry has an +// identifier, which is provided by a counter. Entries are stored in the array +// entries. As new nodes are discovered (referenced for the first time in the +// TOML document), entries are created and appended to the array. An entry +// points to its parent using its id. +// +// To find whether a given key (sequence of []byte) has already been visited, +// the entries are linearly searched, looking for one with the right name and +// parent id. +// +// Given that all keys appear in the document after their parent, it is +// guaranteed that all descendants of a node are stored after the node, this +// speeds up the search process. +// +// When encountering [[array tables]], the descendants of that node are removed +// to allow that branch of the tree to be "rediscovered". To maintain the +// invariant above, the deletion process needs to keep the order of entries. +// This results in more copies in that case. +type SeenTracker struct { + entries []entry + currentIdx int +} + +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} + +func (s *SeenTracker) reset() { + // Always contains a root element at index 0. + s.currentIdx = 0 + if len(s.entries) == 0 { + s.entries = make([]entry, 1, 2) + } else { + s.entries = s.entries[:1] + } + s.entries[0].child = -1 + s.entries[0].next = -1 +} + +type entry struct { + // Use -1 to indicate no child or no sibling. + child int + next int + + name []byte + kind keyKind + explicit bool + kv bool +} + +// Find the index of the child of parentIdx with key k. Returns -1 if +// it does not exist. +func (s *SeenTracker) find(parentIdx int, k []byte) int { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if bytes.Equal(s.entries[i].name, k) { + return i + } + } + return -1 +} + +// Remove all descendants of node at position idx. +func (s *SeenTracker) clear(idx int) { + if idx >= len(s.entries) { + return + } + + for i := s.entries[idx].child; i >= 0; { + next := s.entries[i].next + n := s.entries[0].next + s.entries[0].next = i + s.entries[i].next = n + s.entries[i].name = nil + s.clear(i) + i = next + } + + s.entries[idx].child = -1 +} + +func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { + e := entry{ + child: -1, + next: s.entries[parentIdx].child, + + name: name, + kind: kind, + explicit: explicit, + kv: kv, + } + var idx int + if s.entries[0].next >= 0 { + idx = s.entries[0].next + s.entries[0].next = s.entries[idx].next + s.entries[idx] = e + } else { + idx = len(s.entries) + s.entries = append(s.entries, e) + } + + s.entries[parentIdx].child = idx + + return idx +} + +func (s *SeenTracker) setExplicitFlag(parentIdx int) { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if s.entries[i].kv { + s.entries[i].explicit = true + s.entries[i].kv = false + } + s.setExplicitFlag(i) + } +} + +// CheckExpression takes a top-level node and checks that it does not contain +// keys that have been seen in previous calls, and validates that types are +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { + if s.entries == nil { + s.reset() + } + switch node.Kind { + case unstable.KeyValue: + return s.checkKeyValue(node) + case unstable.Table: + return s.checkTable(node) + case unstable.ArrayTable: + return s.checkArrayTable(node) + default: + panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) + } +} + +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + // This code is duplicated in checkArrayTable. This is because factoring + // it in a function requires to copy the iterator, or allocate it to the + // heap, which is not cheap. + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + first := false + if idx >= 0 { + kind := s.entries[idx].kind + if kind != tableKind { + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + } + if s.entries[idx].explicit { + return false, fmt.Errorf("toml: table %s already exists", string(k)) + } + s.entries[idx].explicit = true + } else { + idx = s.create(parentIdx, k, tableKind, true, false) + first = true + } + + s.currentIdx = idx + + return first, nil +} + +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { + kind := s.entries[idx].kind + if kind != arrayTableKind { + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + } + s.clear(idx) + } + + s.currentIdx = idx + + return firstTime, nil +} + +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { + parentIdx := s.currentIdx + it := node.Key() + + for it.Next() { + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, true) + } else { + entry := s.entries[idx] + if it.IsLast() { + return false, fmt.Errorf("toml: key %s is already defined", string(k)) + } else if entry.kind != tableKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } else if entry.explicit { + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + } + } + + parentIdx = idx + } + + s.entries[parentIdx].kind = valueKind + + value := node.Value() + + switch value.Kind { + case unstable.InlineTable: + return s.checkInlineTable(value) + case unstable.Array: + return s.checkArray(value) + } + + return false, nil +} + +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { + it := node.Children() + for it.Next() { + n := it.Node() + switch n.Kind { + case unstable.InlineTable: + first, err = s.checkInlineTable(n) + if err != nil { + return false, err + } + case unstable.Array: + first, err = s.checkArray(n) + if err != nil { + return false, err + } + } + } + return first, nil +} + +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { + s = pool.Get().(*SeenTracker) + s.reset() + + it := node.Children() + for it.Next() { + n := it.Node() + first, err = s.checkKeyValue(n) + if err != nil { + return false, err + } + } + + // As inline tables are self-contained, the tracker does not + // need to retain the details of what they contain. The + // keyValue element that creates the inline table is kept to + // mark the presence of the inline table and prevent + // redefinition of its keys: check* functions cannot walk into + // a value. + pool.Put(s) + return first, nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go new file mode 100644 index 00000000..bf031739 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -0,0 +1 @@ +package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go new file mode 100644 index 00000000..a856bfdb --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -0,0 +1,122 @@ +package toml + +import ( + "fmt" + "strings" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +// LocalDate represents a calendar day in no specific timezone. +type LocalDate struct { + Year int + Month int + Day int +} + +// AsTime converts d into a specific time instance at midnight in zone. +func (d LocalDate) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDate) UnmarshalText(b []byte) error { + res, err := parseLocalDate(b) + if err != nil { + return err + } + *d = res + return nil +} + +// LocalTime represents a time of day of no specific day in no specific +// timezone. +type LocalTime struct { + Hour int // Hour of the day: [0; 24[ + Minute int // Minute of the hour: [0; 60[ + Second int // Second of the minute: [0; 60[ + Nanosecond int // Nanoseconds within the second: [0, 1000000000[ + Precision int // Number of digits to display for Nanosecond. +} + +// String returns RFC 3339 representation of d. +// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond +// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number +// of digits for nanoseconds is provided. +func (d LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) + + if d.Precision > 0 { + s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] + } else if d.Nanosecond > 0 { + // Nanoseconds are specified, but precision is not provided. Use the + // minimum. + s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") + } + + return s +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalTime) UnmarshalText(b []byte) error { + res, left, err := parseLocalTime(b) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + *d = res + return nil +} + +// LocalDateTime represents a time of a specific day in no specific timezone. +type LocalDateTime struct { + LocalDate + LocalTime +} + +// AsTime converts d into a specific time instance in zone. +func (d LocalDateTime) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDateTime) String() string { + return d.LocalDate.String() + "T" + d.LocalTime.String() +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDateTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDateTime) UnmarshalText(data []byte) error { + res, left, err := parseLocalDateTime(data) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + + *d = res + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go new file mode 100644 index 00000000..7f4e20c1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -0,0 +1,1121 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" +) + +// Marshal serializes a Go value as a TOML document. +// +// It is a shortcut for Encoder.Encode() with the default options. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + err := enc.Encode(v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Encoder writes a TOML document to an output stream. +type Encoder struct { + // output + w io.Writer + + // global settings + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + indentSymbol: " ", + } +} + +// SetTablesInline forces the encoder to emit all tables inline. +// +// This behavior can be controlled on an individual struct field basis with the +// inline tag: +// +// MyField `toml:",inline"` +func (enc *Encoder) SetTablesInline(inline bool) *Encoder { + enc.tablesInline = inline + return enc +} + +// SetArraysMultiline forces the encoder to emit all arrays with one element per +// line. +// +// This behavior can be controlled on an individual struct field basis with the multiline tag: +// +// MyField `multiline:"true"` +func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { + enc.arraysMultiline = multiline + return enc +} + +// SetIndentSymbol defines the string that should be used for indentation. The +// provided string is repeated for each indentation level. Defaults to two +// spaces. +func (enc *Encoder) SetIndentSymbol(s string) *Encoder { + enc.indentSymbol = s + return enc +} + +// SetIndentTables forces the encoder to intent tables and array tables. +func (enc *Encoder) SetIndentTables(indent bool) *Encoder { + enc.indentTables = indent + return enc +} + +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + +// Encode writes a TOML representation of v to the stream. +// +// If v cannot be represented to TOML it returns an error. +// +// # Encoding rules +// +// A top level slice containing only maps or structs is encoded as [[table +// array]]. +// +// All slices not matching rule 1 are encoded as [array]. As a result, any map +// or struct they contain is encoded as an {inline table}. +// +// Nil interfaces and nil pointers are not supported. +// +// Keys in key-values always have one part. +// +// Intermediate tables are always printed. +// +// By default, strings are encoded as literal string, unless they contain either +// a newline character or a single quote. In that case they are emitted as +// quoted strings. +// +// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so +// results in an error. This rule exists because the TOML specification only +// requires parsers to support at least the 64 bits integer range. Allowing +// larger numbers would create non-standard TOML documents, which may not be +// readable (at best) by other implementations. To encode such numbers, a +// solution is a custom type that implements encoding.TextMarshaler. +// +// When encoding structs, fields are encoded in order of definition, with their +// exact name. +// +// Tables and array tables are separated by empty lines. However, consecutive +// subtables definitions are not. For example: +// +// [top1] +// +// [top2] +// [top2.child1] +// +// [[array]] +// +// [[array]] +// [array.child2] +// +// # Struct tags +// +// The encoding of each public struct field can be customized by the format +// string in the "toml" key of the struct field's tag. This follows +// encoding/json's convention. The format string starts with the name of the +// field, optionally followed by a comma-separated list of options. The name may +// be empty in order to provide options without overriding the default name. +// +// The "multiline" option emits strings as quoted multi-line TOML strings. It +// has no effect on fields that would not be encoded as strings. +// +// The "inline" option turns fields that would be emitted as tables into inline +// tables instead. It has no effect on other fields. +// +// The "omitempty" option prevents empty values or groups from being emitted. +// +// The "commented" option prefixes the value and all its children with a comment +// symbol. +// +// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit +// a TOML comment before the value being annotated. Comments are ignored inside +// inline tables. For array tables, the comment is only present before the first +// element of the array. +func (enc *Encoder) Encode(v interface{}) error { + var ( + b []byte + ctx encoderCtx + ) + + ctx.inline = enc.tablesInline + + if v == nil { + return fmt.Errorf("toml: cannot encode a nil interface") + } + + b, err := enc.encode(b, ctx, reflect.ValueOf(v)) + if err != nil { + return err + } + + _, err = enc.w.Write(b) + if err != nil { + return fmt.Errorf("toml: cannot write: %w", err) + } + + return nil +} + +type valueOptions struct { + multiline bool + omitempty bool + commented bool + comment string +} + +type encoderCtx struct { + // Current top-level key. + parentKey []string + + // Key that should be used for a KV. + key string + // Extra flag to account for the empty string + hasKey bool + + // Set to true to indicate that the encoder is inside a KV, so that all + // tables need to be inlined. + insideKv bool + + // Set to true to skip the first table header in an array table. + skipTableHeader bool + + // Should the next table be encoded as inline + inline bool + + // Indentation level + indent int + + // Prefix the current value with a comment. + commented bool + + // Options coming from struct tags + options valueOptions +} + +func (ctx *encoderCtx) shiftKey() { + if ctx.hasKey { + ctx.parentKey = append(ctx.parentKey, ctx.key) + ctx.clearKey() + } +} + +func (ctx *encoderCtx) setKey(k string) { + ctx.key = k + ctx.hasKey = true +} + +func (ctx *encoderCtx) clearKey() { + ctx.key = "" + ctx.hasKey = false +} + +func (ctx *encoderCtx) isRoot() bool { + return len(ctx.parentKey) == 0 && !ctx.hasKey +} + +func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + i := v.Interface() + + switch x := i.(type) { + case time.Time: + if x.Nanosecond() > 0 { + return x.AppendFormat(b, time.RFC3339Nano), nil + } + return x.AppendFormat(b, time.RFC3339), nil + case LocalTime: + return append(b, x.String()...), nil + case LocalDate: + return append(b, x.String()...), nil + case LocalDateTime: + return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } + } + + hasTextMarshaler := v.Type().Implements(textMarshalerType) + if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if !hasTextMarshaler { + v = v.Addr() + } + + if ctx.isRoot() { + return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) + } + + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return nil, err + } + + b = enc.encodeString(b, string(text), ctx.options) + + return b, nil + } + + switch v.Kind() { + // containers + case reflect.Map: + return enc.encodeMap(b, ctx, v) + case reflect.Struct: + return enc.encodeStruct(b, ctx, v) + case reflect.Slice, reflect.Array: + return enc.encodeSlice(b, ctx, v) + case reflect.Interface: + if v.IsNil() { + return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + } + + return enc.encode(b, ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) + } + + return enc.encode(b, ctx, v.Elem()) + + // values + case reflect.String: + b = enc.encodeString(b, v.String(), ctx.options) + case reflect.Float32: + f := v.Float() + + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat32 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat32 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 32) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 32) + } + case reflect.Float64: + f := v.Float() + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat64 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat64 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 64) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 64) + } + case reflect.Bool: + if v.Bool() { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + x := v.Uint() + if x > uint64(math.MaxInt64) { + return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) + } + b = strconv.AppendUint(b, x, 10) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + b = strconv.AppendInt(b, v.Int(), 10) + default: + return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) + } + + return b, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Map: + return v.IsNil() + default: + return false + } +} + +func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { + return options.omitempty && isEmptyValue(v) +} + +func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { + var err error + + if !ctx.inline { + b = enc.encodeComment(ctx.indent, options.comment, b) + b = enc.commented(ctx.commented, b) + b = enc.indent(ctx.indent, b) + } + + b = enc.encodeKey(b, ctx.key) + b = append(b, " = "...) + + // create a copy of the context because the value of a KV shouldn't + // modify the global context. + subctx := ctx + subctx.insideKv = true + subctx.shiftKey() + subctx.options = options + + b, err = enc.encode(b, subctx, v) + if err != nil { + return nil, err + } + + return b, nil +} + +func (enc *Encoder) commented(commented bool, b []byte) []byte { + if commented { + return append(b, "# "...) + } + return b +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Struct: + return isEmptyStruct(v) + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func isEmptyStruct(v reflect.Value) bool { + // TODO: merge with walkStruct and cache. + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + f := v.Field(i) + + if !isEmptyValue(f) { + return false + } + } + + return true +} + +const literalQuote = '\'' + +func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { + if needsQuoting(v) { + return enc.encodeQuotedString(options.multiline, b, v) + } + + return enc.encodeLiteralString(b, v) +} + +func needsQuoting(v string) bool { + // TODO: vectorize + for _, b := range []byte(v) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { + return true + } + } + return false +} + +// caller should have checked that the string does not contain new lines or ' . +func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { + b = append(b, literalQuote) + b = append(b, v...) + b = append(b, literalQuote) + + return b +} + +func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { + stringQuote := `"` + + if multiline { + stringQuote = `"""` + } + + b = append(b, stringQuote...) + if multiline { + b = append(b, '\n') + } + + const ( + hextable = "0123456789ABCDEF" + // U+0000 to U+0008, U+000A to U+001F, U+007F + nul = 0x0 + bs = 0x8 + lf = 0xa + us = 0x1f + del = 0x7f + ) + + for _, r := range []byte(v) { + switch r { + case '\\': + b = append(b, `\\`...) + case '"': + b = append(b, `\"`...) + case '\b': + b = append(b, `\b`...) + case '\f': + b = append(b, `\f`...) + case '\n': + if multiline { + b = append(b, r) + } else { + b = append(b, `\n`...) + } + case '\r': + b = append(b, `\r`...) + case '\t': + b = append(b, `\t`...) + default: + switch { + case r >= nul && r <= bs, r >= lf && r <= us, r == del: + b = append(b, `\u00`...) + b = append(b, hextable[r>>4]) + b = append(b, hextable[r&0x0f]) + default: + b = append(b, r) + } + } + } + + b = append(b, stringQuote...) + + return b +} + +// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . +func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { + return append(b, v...) +} + +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { + if len(ctx.parentKey) == 0 { + return b, nil + } + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + b = enc.commented(ctx.commented, b) + + b = enc.indent(ctx.indent, b) + + b = append(b, '[') + + b = enc.encodeKey(b, ctx.parentKey[0]) + + for _, k := range ctx.parentKey[1:] { + b = append(b, '.') + b = enc.encodeKey(b, k) + } + + b = append(b, "]\n"...) + + return b, nil +} + +//nolint:cyclop +func (enc *Encoder) encodeKey(b []byte, k string) []byte { + needsQuotation := false + cannotUseLiteral := false + + if len(k) == 0 { + return append(b, "''"...) + } + + for _, c := range k { + if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { + continue + } + + if c == literalQuote { + cannotUseLiteral = true + } + + needsQuotation = true + } + + if needsQuotation && needsQuoting(k) { + cannotUseLiteral = true + } + + switch { + case cannotUseLiteral: + return enc.encodeQuotedString(false, b, k) + case needsQuotation: + return enc.encodeLiteralString(b, k) + default: + return enc.encodeUnquotedKey(b, k) + } +} + +func (enc *Encoder) keyToString(k reflect.Value) (string, error) { + keyType := k.Type() + switch { + case keyType.Kind() == reflect.String: + return k.String(), nil + + case keyType.Implements(textMarshalerType): + keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) + } + return string(keyB), nil + } + return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) +} + +func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var ( + t table + emptyValueOptions valueOptions + ) + + iter := v.MapRange() + for iter.Next() { + v := iter.Value() + + if isNil(v) { + continue + } + + k, err := enc.keyToString(iter.Key()) + if err != nil { + return nil, err + } + + if willConvertToTableOrArrayTable(ctx, v) { + t.pushTable(k, v, emptyValueOptions) + } else { + t.pushKV(k, v, emptyValueOptions) + } + } + + sortEntriesByKey(t.kvs) + sortEntriesByKey(t.tables) + + return enc.encodeTable(b, ctx, t) +} + +func sortEntriesByKey(e []entry) { + sort.Slice(e, func(i, j int) bool { + return e[i].Key < e[j].Key + }) +} + +type entry struct { + Key string + Value reflect.Value + Options valueOptions +} + +type table struct { + kvs []entry + tables []entry +} + +func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { + for _, e := range t.kvs { + if e.Key == k { + return + } + } + + t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) +} + +func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { + for _, e := range t.tables { + if e.Key == k { + return + } + } + t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) +} + +func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { + // TODO: cache this + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + k, opts := parseTag(tag) + if !isValidName(k) { + k = "" + } + + f := v.Field(i) + + if k == "" { + if fieldType.Anonymous { + if fieldType.Type.Kind() == reflect.Struct { + walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) + } + continue + } else { + k = fieldType.Name + } + } + + if isNil(f) { + continue + } + + options := valueOptions{ + multiline: opts.multiline, + omitempty: opts.omitempty, + commented: opts.commented, + comment: fieldType.Tag.Get("comment"), + } + + if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { + t.pushKV(k, f, options) + } else { + t.pushTable(k, f, options) + } + } +} + +func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var t table + + walkStruct(ctx, &t, v) + + return enc.encodeTable(b, ctx, t) +} + +func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { + for len(comment) > 0 { + var line string + idx := strings.IndexByte(comment, '\n') + if idx >= 0 { + line = comment[:idx] + comment = comment[idx+1:] + } else { + line = comment + comment = "" + } + b = enc.indent(indent, b) + b = append(b, "# "...) + b = append(b, line...) + b = append(b, '\n') + } + return b +} + +func isValidName(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +type tagOptions struct { + multiline bool + inline bool + omitempty bool + commented bool +} + +func parseTag(tag string) (string, tagOptions) { + opts := tagOptions{} + + idx := strings.Index(tag, ",") + if idx == -1 { + return tag, opts + } + + raw := tag[idx+1:] + tag = string(tag[:idx]) + for raw != "" { + var o string + i := strings.Index(raw, ",") + if i >= 0 { + o, raw = raw[:i], raw[i+1:] + } else { + o, raw = raw, "" + } + switch o { + case "multiline": + opts.multiline = true + case "inline": + opts.inline = true + case "omitempty": + opts.omitempty = true + case "commented": + opts.commented = true + } + } + + return tag, opts +} + +func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + ctx.shiftKey() + + if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { + return enc.encodeTableInline(b, ctx, t) + } + + if !ctx.skipTableHeader { + b, err = enc.encodeTableHeader(ctx, b) + if err != nil { + return nil, err + } + + if enc.indentTables && len(ctx.parentKey) > 0 { + ctx.indent++ + } + } + ctx.skipTableHeader = false + + hasNonEmptyKV := false + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + hasNonEmptyKV = true + + ctx.setKey(kv.Key) + ctx2 := ctx + ctx2.commented = kv.Options.commented || ctx2.commented + + b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + first := true + for _, table := range t.tables { + if shouldOmitEmpty(table.Options, table.Value) { + continue + } + if first { + first = false + if hasNonEmptyKV { + b = append(b, '\n') + } + } else { + b = append(b, "\n"...) + } + + ctx.setKey(table.Key) + + ctx.options = table.Options + ctx2 := ctx + ctx2.commented = ctx2.commented || ctx.options.commented + + b, err = enc.encode(b, ctx2, table.Value) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + b = append(b, '{') + + first := true + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + + if first { + first = false + } else { + b = append(b, `, `...) + } + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + } + + if len(t.tables) > 0 { + panic("inline table cannot contain nested tables, only key-values") + } + + b = append(b, "}"...) + + return b, nil +} + +func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { + if !v.IsValid() { + return false + } + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + return false + } + + t := v.Type() + switch t.Kind() { + case reflect.Map, reflect.Struct: + return !ctx.inline + case reflect.Interface: + return willConvertToTable(ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return false + } + + return willConvertToTable(ctx, v.Elem()) + default: + return false + } +} + +func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { + if ctx.insideKv { + return false + } + t := v.Type() + + if t.Kind() == reflect.Interface { + return willConvertToTableOrArrayTable(ctx, v.Elem()) + } + + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + if v.Len() == 0 { + // An empty slice should be a kv = []. + return false + } + + for i := 0; i < v.Len(); i++ { + t := willConvertToTable(ctx, v.Index(i)) + + if !t { + return false + } + } + + return true + } + + return willConvertToTable(ctx, v) +} + +func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Len() == 0 { + b = append(b, "[]"...) + + return b, nil + } + + if willConvertToTableOrArrayTable(ctx, v) { + return enc.encodeSliceAsArrayTable(b, ctx, v) + } + + return enc.encodeSliceAsArray(b, ctx, v) +} + +// caller should have checked that v is a slice that only contains values that +// encode into tables. +func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + ctx.shiftKey() + + scratch := make([]byte, 0, 64) + + scratch = enc.commented(ctx.commented, scratch) + + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + + scratch = append(scratch, "[["...) + + for i, k := range ctx.parentKey { + if i > 0 { + scratch = append(scratch, '.') + } + + scratch = enc.encodeKey(scratch, k) + } + + scratch = append(scratch, "]]\n"...) + ctx.skipTableHeader = true + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + if enc.indentTables { + ctx.indent++ + } + + for i := 0; i < v.Len(); i++ { + if i != 0 { + b = append(b, "\n"...) + } + + b = append(b, scratch...) + + var err error + b, err = enc.encode(b, ctx, v.Index(i)) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + multiline := ctx.options.multiline || enc.arraysMultiline + separator := ", " + + b = append(b, '[') + + subCtx := ctx + subCtx.options = valueOptions{} + + if multiline { + separator = ",\n" + + b = append(b, '\n') + + subCtx.indent++ + } + + var err error + first := true + + for i := 0; i < v.Len(); i++ { + if first { + first = false + } else { + b = append(b, separator...) + } + + if multiline { + b = enc.indent(subCtx.indent, b) + } + + b, err = enc.encode(b, subCtx, v.Index(i)) + if err != nil { + return nil, err + } + } + + if multiline { + b = append(b, '\n') + b = enc.indent(ctx.indent, b) + } + + b = append(b, ']') + + return b, nil +} + +func (enc *Encoder) indent(level int, b []byte) []byte { + for i := 0; i < level; i++ { + b = append(b, enc.indentSymbol...) + } + + return b +} diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go new file mode 100644 index 00000000..802e7e4d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -0,0 +1,107 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +type strict struct { + Enabled bool + + // Tracks the current key being processed. + key tracker.KeyTracker + + missing []unstable.ParserError +} + +func (s *strict) EnterTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateTable(node) +} + +func (s *strict) EnterArrayTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateArrayTable(node) +} + +func (s *strict) EnterKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Push(node) +} + +func (s *strict) ExitKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Pop(node) +} + +func (s *strict) MissingTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing table", + Key: s.key.Key(), + }) +} + +func (s *strict) MissingField(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing field", + Key: s.key.Key(), + }) +} + +func (s *strict) Error(doc []byte) error { + if !s.Enabled || len(s.missing) == 0 { + return nil + } + + err := &StrictMissingError{ + Errors: make([]DecodeError, 0, len(s.missing)), + } + + for _, derr := range s.missing { + derr := derr + err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) + } + + return err +} + +func keyLocation(node *unstable.Node) []byte { + k := node.Key() + + hasOne := k.Next() + if !hasOne { + panic("should not be called with empty key") + } + + start := k.Node().Data + end := k.Node().Data + + for k.Next() { + end = k.Node().Data + } + + return danger.BytesRange(start, end) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf new file mode 100644 index 00000000..473f3749 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/toml.abnf @@ -0,0 +1,243 @@ +;; This document describes TOML's syntax, using the ABNF format (defined in +;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). +;; +;; All valid TOML documents will match this description, however certain +;; invalid documents would need to be rejected as per the semantics described +;; in the supporting text description. + +;; It is possible to try this grammar interactively, using instaparse. +;; http://instaparse.mojombo.com/ +;; +;; To do so, in the lower right, click on Options and change `:input-format` to +;; ':abnf'. Then paste this entire ABNF document into the grammar entry box +;; (above the options). Then you can type or paste a sample TOML document into +;; the beige box on the left. Tada! + +;; Overall Structure + +toml = expression *( newline expression ) + +expression = ws [ comment ] +expression =/ ws keyval ws [ comment ] +expression =/ ws table ws [ comment ] + +;; Whitespace + +ws = *wschar +wschar = %x20 ; Space +wschar =/ %x09 ; Horizontal tab + +;; Newline + +newline = %x0A ; LF +newline =/ %x0D.0A ; CRLF + +;; Comment + +comment-start-symbol = %x23 ; # +non-ascii = %x80-D7FF / %xE000-10FFFF +non-eol = %x09 / %x20-7F / non-ascii + +comment = comment-start-symbol *non-eol + +;; Key-Value pairs + +keyval = key keyval-sep val + +key = simple-key / dotted-key +simple-key = quoted-key / unquoted-key + +unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ +quoted-key = basic-string / literal-string +dotted-key = simple-key 1*( dot-sep simple-key ) + +dot-sep = ws %x2E ws ; . Period +keyval-sep = ws %x3D ws ; = + +val = string / boolean / array / inline-table / date-time / float / integer + +;; String + +string = ml-basic-string / basic-string / ml-literal-string / literal-string + +;; Basic String + +basic-string = quotation-mark *basic-char quotation-mark + +quotation-mark = %x22 ; " + +basic-char = basic-unescaped / escaped +basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +escaped = escape escape-seq-char + +escape = %x5C ; \ +escape-seq-char = %x22 ; " quotation mark U+0022 +escape-seq-char =/ %x5C ; \ reverse solidus U+005C +escape-seq-char =/ %x62 ; b backspace U+0008 +escape-seq-char =/ %x66 ; f form feed U+000C +escape-seq-char =/ %x6E ; n line feed U+000A +escape-seq-char =/ %x72 ; r carriage return U+000D +escape-seq-char =/ %x74 ; t tab U+0009 +escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX +escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + +;; Multiline Basic String + +ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + ml-basic-string-delim +ml-basic-string-delim = 3quotation-mark +ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + +mlb-content = mlb-char / newline / mlb-escaped-nl +mlb-char = mlb-unescaped / escaped +mlb-quotes = 1*2quotation-mark +mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +mlb-escaped-nl = escape ws newline *( wschar / newline ) + +;; Literal String + +literal-string = apostrophe *literal-char apostrophe + +apostrophe = %x27 ; ' apostrophe + +literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + +;; Multiline Literal String + +ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + ml-literal-string-delim +ml-literal-string-delim = 3apostrophe +ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + +mll-content = mll-char / newline +mll-char = %x09 / %x20-26 / %x28-7E / non-ascii +mll-quotes = 1*2apostrophe + +;; Integer + +integer = dec-int / hex-int / oct-int / bin-int + +minus = %x2D ; - +plus = %x2B ; + +underscore = %x5F ; _ +digit1-9 = %x31-39 ; 1-9 +digit0-7 = %x30-37 ; 0-7 +digit0-1 = %x30-31 ; 0-1 + +hex-prefix = %x30.78 ; 0x +oct-prefix = %x30.6F ; 0o +bin-prefix = %x30.62 ; 0b + +dec-int = [ minus / plus ] unsigned-dec-int +unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) + +hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) +oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) +bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) + +;; Float + +float = float-int-part ( exp / frac [ exp ] ) +float =/ special-float + +float-int-part = dec-int +frac = decimal-point zero-prefixable-int +decimal-point = %x2E ; . +zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) + +exp = "e" float-exp-part +float-exp-part = [ minus / plus ] zero-prefixable-int + +special-float = [ minus / plus ] ( inf / nan ) +inf = %x69.6e.66 ; inf +nan = %x6e.61.6e ; nan + +;; Boolean + +boolean = true / false + +true = %x74.72.75.65 ; true +false = %x66.61.6C.73.65 ; false + +;; Date and Time (as defined in RFC 3339) + +date-time = offset-date-time / local-date-time / local-date / local-time + +date-fullyear = 4DIGIT +date-month = 2DIGIT ; 01-12 +date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year +time-delim = "T" / %x20 ; T, t, or space +time-hour = 2DIGIT ; 00-23 +time-minute = 2DIGIT ; 00-59 +time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules +time-secfrac = "." 1*DIGIT +time-numoffset = ( "+" / "-" ) time-hour ":" time-minute +time-offset = "Z" / time-numoffset + +partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] +full-date = date-fullyear "-" date-month "-" date-mday +full-time = partial-time time-offset + +;; Offset Date-Time + +offset-date-time = full-date time-delim full-time + +;; Local Date-Time + +local-date-time = full-date time-delim partial-time + +;; Local Date + +local-date = full-date + +;; Local Time + +local-time = partial-time + +;; Array + +array = array-open [ array-values ] ws-comment-newline array-close + +array-open = %x5B ; [ +array-close = %x5D ; ] + +array-values = ws-comment-newline val ws-comment-newline array-sep array-values +array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + +array-sep = %x2C ; , Comma + +ws-comment-newline = *( wschar / [ comment ] newline ) + +;; Table + +table = std-table / array-table + +;; Standard Table + +std-table = std-table-open key std-table-close + +std-table-open = %x5B ws ; [ Left square bracket +std-table-close = ws %x5D ; ] Right square bracket + +;; Inline Table + +inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + +inline-table-open = %x7B ws ; { +inline-table-close = ws %x7D ; } +inline-table-sep = ws %x2C ws ; , Comma + +inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + +;; Array Table + +array-table = array-table-open key array-table-close + +array-table-open = %x5B.5B ws ; [[ Double left square bracket +array-table-close = ws %x5D.5D ; ]] Double right square bracket + +;; Built-in ABNF terms, reproduced here for clarity + +ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +DIGIT = %x30-39 ; 0-9 +HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go new file mode 100644 index 00000000..3c6b8fe5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -0,0 +1,14 @@ +package toml + +import ( + "encoding" + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) +var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) +var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go new file mode 100644 index 00000000..98231bae --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -0,0 +1,1311 @@ +package toml + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +// Unmarshal deserializes a TOML document into a Go value. +// +// It is a shortcut for Decoder.Decode() with the default options. +func Unmarshal(data []byte, v interface{}) error { + p := unstable.Parser{} + p.Reset(data) + d := decoder{p: &p} + + return d.FromParser(v) +} + +// Decoder reads and decode a TOML document from an input stream. +type Decoder struct { + // input + r io.Reader + + // global settings + strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool +} + +// NewDecoder creates a new Decoder that will read from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains a key that does not match a +// non-ignored field. +// +// In that case, the Decoder returns a StrictMissingError that can be used to +// retrieve the individual errors as well as generate a human readable +// description of the missing fields. +func (d *Decoder) DisallowUnknownFields() *Decoder { + d.strict = true + return d +} + +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + +// Decode the whole content of r into v. +// +// By default, values in the document that don't exist in the target Go value +// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. +// +// When a TOML local date, time, or date-time is decoded into a time.Time, its +// value is represented in time.Local timezone. Otherwise the appropriate Local* +// structure is used. For time values, precision up to the nanosecond is +// supported by truncating extra digits. +// +// Empty tables decoded in an interface{} create an empty initialized +// map[string]interface{}. +// +// Types implementing the encoding.TextUnmarshaler interface are decoded from a +// TOML string. +// +// When decoding a number, go-toml will return an error if the number is out of +// bounds for the target type (which includes negative numbers when decoding +// into an unsigned int). +// +// If an error occurs while decoding the content of the document, this function +// returns a toml.DecodeError, providing context about the issue. When using +// strict mode and a field is missing, a `toml.StrictMissingError` is +// returned. In any other case, this function returns a standard Go error. +// +// # Type mapping +// +// List of supported TOML types and their associated accepted Go types: +// +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return fmt.Errorf("toml: %w", err) + } + + p := unstable.Parser{} + p.Reset(b) + dec := decoder{ + p: &p, + strict: strict{ + Enabled: d.strict, + }, + unmarshalerInterface: d.unmarshalerInterface, + } + + return dec.FromParser(v) +} + +type decoder struct { + // Which parser instance in use for this decoding session. + p *unstable.Parser + + // Flag indicating that the current expression is stashed. + // If set to true, calling nextExpr will not actually pull a new expression + // but turn off the flag instead. + stashedExpr bool + + // Skip expressions until a table is found. This is set to true when a + // table could not be created (missing field in map), so all KV expressions + // need to be skipped. + skipUntilTable bool + + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + + // Tracks position in Go arrays. + // This is used when decoding [[array tables]] into Go arrays. Given array + // tables are separate TOML expression, we need to keep track of where we + // are at in the Go array, as we can't just introspect its size. + arrayIndexes map[reflect.Value]int + + // Tracks keys that have been seen, with which type. + seen tracker.SeenTracker + + // Strict mode + strict strict + + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + + // Current context for the error. + errorContext *errorContext +} + +type errorContext struct { + Struct reflect.Type + Field []int +} + +func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) +} + +func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { + if d.errorContext != nil && d.errorContext.Struct != nil { + ctx := d.errorContext + f := ctx.Struct.FieldByIndex(ctx.Field) + return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + } + return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) +} + +func (d *decoder) expr() *unstable.Node { + return d.p.Expression() +} + +func (d *decoder) nextExpr() bool { + if d.stashedExpr { + d.stashedExpr = false + return true + } + return d.p.NextExpression() +} + +func (d *decoder) stashExpr() { + d.stashedExpr = true +} + +func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { + if d.arrayIndexes == nil { + d.arrayIndexes = make(map[reflect.Value]int, 1) + } + + idx, ok := d.arrayIndexes[v] + + if !ok { + d.arrayIndexes[v] = 0 + } else if shouldAppend { + idx++ + d.arrayIndexes[v] = idx + } + + return idx +} + +func (d *decoder) FromParser(v interface{}) error { + r := reflect.ValueOf(v) + if r.Kind() != reflect.Ptr { + return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) + } + + if r.IsNil() { + return fmt.Errorf("toml: decoding pointer target cannot be nil") + } + + r = r.Elem() + if r.Kind() == reflect.Interface && r.IsNil() { + newMap := map[string]interface{}{} + r.Set(reflect.ValueOf(newMap)) + } + + err := d.fromParser(r) + if err == nil { + return d.strict.Error(d.p.Data()) + } + + var e *unstable.ParserError + if errors.As(err, &e) { + return wrapDecodeError(d.p.Data(), e) + } + + return err +} + +func (d *decoder) fromParser(root reflect.Value) error { + for d.nextExpr() { + err := d.handleRootExpression(d.expr(), root) + if err != nil { + return err + } + } + + return d.p.Error() +} + +/* +Rules for the unmarshal code: + +- The stack is used to keep track of which values need to be set where. +- handle* functions <=> switch on a given unstable.Kind. +- unmarshalX* functions need to unmarshal a node of kind X. +- An "object" is either a struct or a map. +*/ + +func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { + var x reflect.Value + var err error + var first bool // used for to clear array tables on first use + + if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { + first, err = d.seen.CheckExpression(expr) + if err != nil { + return err + } + } + + switch expr.Kind { + case unstable.KeyValue: + if d.skipUntilTable { + return nil + } + x, err = d.handleKeyValue(expr, v) + case unstable.Table: + d.skipUntilTable = false + d.strict.EnterTable(expr) + x, err = d.handleTable(expr.Key(), v) + case unstable.ArrayTable: + d.skipUntilTable = false + d.strict.EnterArrayTable(expr) + d.clearArrayTable = first + x, err = d.handleArrayTable(expr.Key(), v) + default: + panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) + } + + if d.skipUntilTable { + if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { + d.strict.MissingTable(expr) + } + } else if err == nil && x.IsValid() { + v.Set(x) + } + + return err +} + +func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.Next() { + return d.handleArrayTablePart(key, v) + } + return d.handleKeyValues(v) +} + +func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + switch v.Kind() { + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } + } + return d.handleArrayTableCollectionLast(key, elem) + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollectionLast(key, elem) + if err != nil { + return reflect.Value{}, err + } + v.Elem().Set(elem) + + return v, nil + case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } + elemType := v.Type().Elem() + var elem reflect.Value + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + elem2, err := d.handleArrayTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + return reflect.Append(v, elem), nil + case reflect.Array: + idx := d.arrayIndex(true, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + default: + return reflect.Value{}, d.typeMismatchError("array table", v.Type()) + } +} + +// When parsing an array table expression, each part of the key needs to be +// evaluated like a normal key, but if it returns a collection, it also needs to +// point to the last element of the collection. Unless it is the last part of +// the key, then it needs to create a new element at the end. +func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.IsLast() { + return d.handleArrayTableCollectionLast(key, v) + } + + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollection(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem.IsValid() { + v.Elem().Set(elem) + } + + return v, nil + case reflect.Slice: + elem := v.Index(v.Len() - 1) + x, err := d.handleArrayTable(key, elem) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + + return v, err + case reflect.Array: + idx := d.arrayIndex(false, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + v.Set(reflect.New(v.Type().Elem())) + } + elem = v.Elem() + return d.handleKeyPart(key, elem, nextFn, makeFn) + case reflect.Map: + vt := v.Type() + + // Create the key for the map element. Convert to key type. + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + vt := v.Type() + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + // If there is no value in the map, create a new one according to + // the map type. If the element type is interface, create either a + // map[string]interface{} or a []interface{} depending on whether + // this is the last part of the array table key. + + t := vt.Elem() + if t.Kind() == reflect.Interface { + mv = makeFn() + } else { + mv = reflect.New(t).Elem() + } + set = true + } else if mv.Kind() == reflect.Interface { + mv = mv.Elem() + if !mv.IsValid() { + mv = makeFn() + } + set = true + } else if !mv.CanAddr() { + vt := v.Type() + t := vt.Elem() + oldmv := mv + mv = reflect.New(t).Elem() + mv.Set(oldmv) + set = true + } + + x, err := nextFn(key, mv) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + mv = x + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + return reflect.Value{}, nil + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + x, err := nextFn(key, f) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + f.Set(x) + } + d.errorContext.Field = nil + d.errorContext.Struct = nil + case reflect.Interface: + if v.Elem().IsValid() { + v = v.Elem() + } else { + v = makeMapStringInterface() + } + + x, err := d.handleKeyPart(key, v, nextFn, makeFn) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + default: + panic(fmt.Errorf("unhandled part: %s", v.Kind())) + } + + return rv, nil +} + +// HandleArrayTablePart navigates the Go structure v using the key v. It is +// only used for the prefix (non-last) parts of an array-table. When +// encountering a collection, it should go to the last element. +func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + var makeFn valueMakerFn + if key.IsLast() { + makeFn = makeSliceInterface + } else { + makeFn = makeMapStringInterface + } + return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) +} + +// HandleTable returns a reference when it has checked the next expression but +// cannot handle it. +func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if v.Kind() == reflect.Slice { + if v.Len() == 0 { + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") + } + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil + } + if key.Next() { + // Still scoping the key + return d.handleTablePart(key, v) + } + // Done scoping the key. + // Now handle all the key-value expressions in this table. + return d.handleKeyValues(v) +} + +// Handle root expressions until the end of the document or the next +// non-key-value. +func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + var rv reflect.Value + for d.nextExpr() { + expr := d.expr() + if expr.Kind != unstable.KeyValue { + // Stash the expression so that fromParser can just loop and use + // the right handler. + // We could just recurse ourselves here, but at least this gives a + // chance to pop the stack a bit. + d.stashExpr() + break + } + + _, err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + x, err := d.handleKeyValue(expr, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + rv = x + } + } + return rv, nil +} + +type ( + handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) + valueMakerFn func() reflect.Value +) + +func makeMapStringInterface() reflect.Value { + return reflect.MakeMap(mapStringInterfaceType) +} + +func makeSliceInterface() reflect.Value { + return reflect.MakeSlice(sliceInterfaceType, 0, 16) +} + +func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) +} + +func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { + // Special case for time, because we allow to unmarshal to it from + // different kind of AST nodes. + if v.Type() == timeType { + return false, nil + } + + if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { + err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) + if err != nil { + return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) + } + + return true, nil + } + + return false, nil +} + +func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { + for v.Kind() == reflect.Ptr { + v = initAndDereferencePointer(v) + } + + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } + + switch value.Kind { + case unstable.String: + return d.unmarshalString(value, v) + case unstable.Integer: + return d.unmarshalInteger(value, v) + case unstable.Float: + return d.unmarshalFloat(value, v) + case unstable.Bool: + return d.unmarshalBool(value, v) + case unstable.DateTime: + return d.unmarshalDateTime(value, v) + case unstable.LocalDate: + return d.unmarshalLocalDate(value, v) + case unstable.LocalTime: + return d.unmarshalLocalTime(value, v) + case unstable.LocalDateTime: + return d.unmarshalLocalDateTime(value, v) + case unstable.InlineTable: + return d.unmarshalInlineTable(value, v) + case unstable.Array: + return d.unmarshalArray(value, v) + default: + panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) + } +} + +func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 16)) + } else { + v.SetLen(0) + } + case reflect.Array: + // arrays are always initialized + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + err := d.unmarshalArray(array, elem) + if err != nil { + return err + } + v.Set(elem) + return nil + default: + // TODO: use newDecodeError, but first the parser needs to fill + // array.Data. + return d.typeMismatchError("array", v.Type()) + } + + elemType := v.Type().Elem() + + it := array.Children() + idx := 0 + for it.Next() { + n := it.Node() + + // TODO: optimize + if v.Kind() == reflect.Slice { + elem := reflect.New(elemType).Elem() + + err := d.handleValue(n, elem) + if err != nil { + return err + } + + v.Set(reflect.Append(v, elem)) + } else { // array + if idx >= v.Len() { + return nil + } + elem := v.Index(idx) + err := d.handleValue(n, elem) + if err != nil { + return err + } + idx++ + } + } + + return nil +} + +func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { + // Make sure v is an initialized object. + switch v.Kind() { + case reflect.Map: + if v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + case reflect.Struct: + // structs are always initialized. + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = makeMapStringInterface() + v.Set(elem) + } + return d.unmarshalInlineTable(itable, elem) + default: + return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) + } + + it := itable.Children() + for it.Next() { + n := it.Node() + + x, err := d.handleKeyValue(n, v) + if err != nil { + return err + } + if x.IsValid() { + v = x + } + } + + return nil +} + +func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { + dt, err := parseDateTime(value.Data) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(dt)) + return nil +} + +func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { + ld, err := parseLocalDate(value.Data) + if err != nil { + return err + } + + if v.Type() == timeType { + cast := ld.AsTime(time.Local) + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ld)) + + return nil +} + +func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { + lt, rest, err := parseLocalTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local time") + } + + v.Set(reflect.ValueOf(lt)) + return nil +} + +func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { + ldt, rest, err := parseLocalDateTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local date time") + } + + if v.Type() == timeType { + cast := ldt.AsTime(time.Local) + + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ldt)) + + return nil +} + +func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { + b := value.Data[0] == 't' + + switch v.Kind() { + case reflect.Bool: + v.SetBool(b) + case reflect.Interface: + v.Set(reflect.ValueOf(b)) + default: + return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) + } + + return nil +} + +func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { + f, err := parseFloat(value.Data) + if err != nil { + return err + } + + switch v.Kind() { + case reflect.Float64: + v.SetFloat(f) + case reflect.Float32: + if f > math.MaxFloat32 { + return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) + } + v.SetFloat(f) + case reflect.Interface: + v.Set(reflect.ValueOf(f)) + default: + return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) + } + + return nil +} + +const ( + maxInt = int64(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +// Maximum value of uint for decoding. Currently the decoder parses the integer +// into an int64. As a result, on architectures where uint is 64 bits, the +// effective maximum uint we can decode is the maximum of int64. On +// architectures where uint is 32 bits, the maximum value we can decode is +// lower: the maximum of uint32. I didn't find a way to figure out this value at +// compile time, so it is computed during initialization. +var maxUint int64 = math.MaxInt64 + +func init() { + m := uint64(^uint(0)) + if m < uint64(maxUint) { + maxUint = int64(m) + } +} + +func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { + kind := v.Kind() + if kind == reflect.Float32 || kind == reflect.Float64 { + return d.unmarshalFloat(value, v) + } + + i, err := parseInteger(value.Data) + if err != nil { + return err + } + + var r reflect.Value + + switch kind { + case reflect.Int64: + v.SetInt(i) + return nil + case reflect.Int32: + if i < math.MinInt32 || i > math.MaxInt32 { + return fmt.Errorf("toml: number %d does not fit in an int32", i) + } + + r = reflect.ValueOf(int32(i)) + case reflect.Int16: + if i < math.MinInt16 || i > math.MaxInt16 { + return fmt.Errorf("toml: number %d does not fit in an int16", i) + } + + r = reflect.ValueOf(int16(i)) + case reflect.Int8: + if i < math.MinInt8 || i > math.MaxInt8 { + return fmt.Errorf("toml: number %d does not fit in an int8", i) + } + + r = reflect.ValueOf(int8(i)) + case reflect.Int: + if i < minInt || i > maxInt { + return fmt.Errorf("toml: number %d does not fit in an int", i) + } + + r = reflect.ValueOf(int(i)) + case reflect.Uint64: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) + } + + r = reflect.ValueOf(uint64(i)) + case reflect.Uint32: + if i < 0 || i > math.MaxUint32 { + return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) + } + + r = reflect.ValueOf(uint32(i)) + case reflect.Uint16: + if i < 0 || i > math.MaxUint16 { + return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) + } + + r = reflect.ValueOf(uint16(i)) + case reflect.Uint8: + if i < 0 || i > math.MaxUint8 { + return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) + } + + r = reflect.ValueOf(uint8(i)) + case reflect.Uint: + if i < 0 || i > maxUint { + return fmt.Errorf("toml: negative number %d does not fit in an uint", i) + } + + r = reflect.ValueOf(uint(i)) + case reflect.Interface: + r = reflect.ValueOf(i) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) + } + + if !r.Type().AssignableTo(v.Type()) { + r = r.Convert(v.Type()) + } + + v.Set(r) + + return nil +} + +func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.String: + v.SetString(string(value.Data)) + case reflect.Interface: + v.Set(reflect.ValueOf(string(value.Data))) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) + } + + return nil +} + +func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { + d.strict.EnterKeyValue(expr) + + v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) + if d.skipUntilTable { + d.strict.MissingField(expr) + d.skipUntilTable = false + } + + d.strict.ExitKeyValue(expr) + + return v, err +} + +func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + if key.Next() { + // Still scoping the key + return d.handleKeyValuePart(key, value, v) + } + // Done scoping the key. + // v is whatever Go value we need to fill. + return reflect.Value{}, d.handleValue(value, v) +} + +func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { + switch { + case stringType.AssignableTo(keyType): + return reflect.ValueOf(string(data)), nil + + case stringType.ConvertibleTo(keyType): + return reflect.ValueOf(string(data)).Convert(keyType), nil + + case keyType.Implements(textUnmarshalerType): + mk := reflect.New(keyType.Elem()) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk, nil + + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + mk := reflect.New(keyType) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk.Elem(), nil + } + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) +} + +func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + // contains the replacement for v + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Map: + vt := v.Type() + + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() || key.IsLast() { + set = true + mv = reflect.New(v.Type().Elem()).Elem() + } + + nv, err := d.handleKeyValueInner(key, value, mv) + if err != nil { + return reflect.Value{}, err + } + if nv.IsValid() { + mv = nv + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + break + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. + nvp := reflect.New(v.Type()) + nvp.Elem().Set(v) + v = nvp.Elem() + _, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + return nvp.Elem(), nil + } + x, err := d.handleKeyValueInner(key, value, f) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + f.Set(x) + } + d.errorContext.Struct = nil + d.errorContext.Field = nil + case reflect.Interface: + v = v.Elem() + + // Following encoding/json: decoding an object into an + // interface{}, it needs to always hold a + // map[string]interface{}. This is for the types to be + // consistent whether a previous value was set or not. + if !v.IsValid() || v.Type() != mapStringInterfaceType { + v = makeMapStringInterface() + } + + x, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + rv = v + elem = ptr.Elem() + } + + elem2, err := d.handleKeyValuePart(key, value, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + v.Elem().Set(elem) + default: + return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) + } + + return rv, nil +} + +func initAndDereferencePointer(v reflect.Value) reflect.Value { + var elem reflect.Value + if v.IsNil() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + } + elem = v.Elem() + return elem +} + +// Same as reflect.Value.FieldByIndex, but creates pointers if needed. +func fieldByIndex(v reflect.Value, path []int) reflect.Value { + for _, x := range path { + v = v.Field(x) + + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + return v +} + +type fieldPathsMap = map[string][]int + +var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap + +func structFieldPath(v reflect.Value, name string) ([]int, bool) { + t := v.Type() + + cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) + fieldPaths, ok := cache[danger.MakeTypeID(t)] + + if !ok { + fieldPaths = map[string][]int{} + + forEachField(t, nil, func(name string, path []int) { + fieldPaths[name] = path + // extra copy for the case-insensitive match + fieldPaths[strings.ToLower(name)] = path + }) + + newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) + newCache[danger.MakeTypeID(t)] = fieldPaths + for k, v := range cache { + newCache[k] = v + } + globalFieldPathsCache.Store(newCache) + } + + path, ok := fieldPaths[name] + if !ok { + path, ok = fieldPaths[strings.ToLower(name)] + } + return path, ok +} + +func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + + if !f.Anonymous && f.PkgPath != "" { + // only consider exported fields. + continue + } + + fieldPath := append(path, i) + fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] + + name := f.Tag.Get("toml") + if name == "-" { + continue + } + + if i := strings.IndexByte(name, ','); i >= 0 { + name = name[:i] + } + + if f.Anonymous && name == "" { + t2 := f.Type + if t2.Kind() == reflect.Ptr { + t2 = t2.Elem() + } + + if t2.Kind() == reflect.Struct { + forEachField(t2, fieldPath, do) + } + continue + } + + if name == "" { + name = f.Name + } + + do(name, fieldPath) + } +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/BUILD.bazel b/vendor/github.com/pelletier/go-toml/v2/unstable/BUILD.bazel new file mode 100644 index 00000000..6b98cef7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "unstable", + srcs = [ + "ast.go", + "builder.go", + "doc.go", + "kind.go", + "parser.go", + "scanner.go", + "unmarshaler.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pelletier/go-toml/v2/unstable", + importpath = "github.com/pelletier/go-toml/v2/unstable", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/pelletier/go-toml/v2/internal/characters", + "//vendor/github.com/pelletier/go-toml/v2/internal/danger", + ], +) diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go new file mode 100644 index 00000000..f526bf2c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -0,0 +1,136 @@ +package unstable + +import ( + "fmt" + "unsafe" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// Iterator over a sequence of nodes. +// +// Starts uninitialized, you need to call Next() first. +// +// For example: +// +// it := n.Children() +// for it.Next() { +// n := it.Node() +// // do something with n +// } +type Iterator struct { + started bool + node *Node +} + +// Next moves the iterator forward and returns true if points to a +// node, false otherwise. +func (c *Iterator) Next() bool { + if !c.started { + c.started = true + } else if c.node.Valid() { + c.node = c.node.Next() + } + return c.node.Valid() +} + +// IsLast returns true if the current node of the iterator is the last +// one. Subsequent calls to Next() will return false. +func (c *Iterator) IsLast() bool { + return c.node.next == 0 +} + +// Node returns a pointer to the node pointed at by the iterator. +func (c *Iterator) Node() *Node { + return c.node +} + +// Node in a TOML expression AST. +// +// Depending on Kind, its sequence of children should be interpreted +// differently. +// +// - Array have one child per element in the array. +// - InlineTable have one child per key-value in the table (each of kind +// InlineTable). +// - KeyValue have at least two children. The first one is the value. The rest +// make a potentially dotted key. +// - Table and ArrayTable's children represent a dotted key (same as +// KeyValue, but without the first node being the value). +// +// When relevant, Raw describes the range of bytes this node is referring to in +// the input document. Use Parser.Raw() to retrieve the actual bytes. +type Node struct { + Kind Kind + Raw Range // Raw bytes from the input. + Data []byte // Node value (either allocated or referencing the input). + + // References to other nodes, as offsets in the backing array + // from this node. References can go backward, so those can be + // negative. + next int // 0 if last element + child int // 0 if no child +} + +// Range of bytes in the document. +type Range struct { + Offset uint32 + Length uint32 +} + +// Next returns a pointer to the next node, or nil if there is no next node. +func (n *Node) Next() *Node { + if n.next == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.next)) +} + +// Child returns a pointer to the first child node of this node. Other children +// can be accessed calling Next on the first child. Returns an nil if this Node +// has no child. +func (n *Node) Child() *Node { + if n.child == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.child)) +} + +// Valid returns true if the node's kind is set (not to Invalid). +func (n *Node) Valid() bool { + return n != nil +} + +// Key returns the children nodes making the Key on a supported node. Panics +// otherwise. They are guaranteed to be all be of the Kind Key. A simple key +// would return just one element. +func (n *Node) Key() Iterator { + switch n.Kind { + case KeyValue: + value := n.Child() + if !value.Valid() { + panic(fmt.Errorf("KeyValue should have at least two children")) + } + return Iterator{node: value.Next()} + case Table, ArrayTable: + return Iterator{node: n.Child()} + default: + panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) + } +} + +// Value returns a pointer to the value node of a KeyValue. +// Guaranteed to be non-nil. Panics if not called on a KeyValue node, +// or if the Children are malformed. +func (n *Node) Value() *Node { + return n.Child() +} + +// Children returns an iterator over a node's children. +func (n *Node) Children() Iterator { + return Iterator{node: n.Child()} +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go new file mode 100644 index 00000000..9538e30d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -0,0 +1,71 @@ +package unstable + +// root contains a full AST. +// +// It is immutable once constructed with Builder. +type root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *root) at(idx reference) *Node { + return &r.nodes[idx] +} + +type reference int + +const invalidReference reference = -1 + +func (r reference) Valid() bool { + return r != invalidReference +} + +type builder struct { + tree root + lastIdx int +} + +func (b *builder) Tree() *root { + return &b.tree +} + +func (b *builder) NodeAt(ref reference) *Node { + return b.tree.at(ref) +} + +func (b *builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *builder) Push(n Node) reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return reference(b.lastIdx) +} + +func (b *builder) PushAndChain(n Node) reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return reference(b.lastIdx) +} + +func (b *builder) AttachChild(parent reference, child reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *builder) Chain(from reference, to reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go new file mode 100644 index 00000000..7ff26c53 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go @@ -0,0 +1,3 @@ +// Package unstable provides APIs that do not meet the backward compatibility +// guarantees yet. +package unstable diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go new file mode 100644 index 00000000..ff9df1be --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -0,0 +1,71 @@ +package unstable + +import "fmt" + +// Kind represents the type of TOML structure contained in a given Node. +type Kind int + +const ( + // Meta + Invalid Kind = iota + Comment + Key + + // Top level structures + Table + ArrayTable + KeyValue + + // Containers values + Array + InlineTable + + // Values + String + Bool + Float + Integer + LocalDate + LocalTime + LocalDateTime + DateTime +) + +// String implementation of fmt.Stringer. +func (k Kind) String() string { + switch k { + case Invalid: + return "Invalid" + case Comment: + return "Comment" + case Key: + return "Key" + case Table: + return "Table" + case ArrayTable: + return "ArrayTable" + case KeyValue: + return "KeyValue" + case Array: + return "Array" + case InlineTable: + return "InlineTable" + case String: + return "String" + case Bool: + return "Bool" + case Float: + return "Float" + case Integer: + return "Integer" + case LocalDate: + return "LocalDate" + case LocalTime: + return "LocalTime" + case LocalDateTime: + return "LocalDateTime" + case DateTime: + return "DateTime" + } + panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go new file mode 100644 index 00000000..50358a44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -0,0 +1,1245 @@ +package unstable + +import ( + "bytes" + "fmt" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// ParserError describes an error relative to the content of the document. +// +// It cannot outlive the instance of Parser it refers to, and may cause panics +// if the parser is reset. +type ParserError struct { + Highlight []byte + Message string + Key []string // optional +} + +// Error is the implementation of the error interface. +func (e *ParserError) Error() string { + return e.Message +} + +// NewParserError is a convenience function to create a ParserError +// +// Warning: Highlight needs to be a subslice of Parser.data, so only slices +// returned by Parser.Raw are valid candidates. +func NewParserError(highlight []byte, format string, args ...interface{}) error { + return &ParserError{ + Highlight: highlight, + Message: fmt.Errorf(format, args...).Error(), + } +} + +// Parser scans over a TOML-encoded document and generates an iterative AST. +// +// To prime the Parser, first reset it with the contents of a TOML document. +// Then, process all top-level expressions sequentially. See Example. +// +// Don't forget to check Error() after you're done parsing. +// +// Each top-level expression needs to be fully processed before calling +// NextExpression() again. Otherwise, calls to various Node methods may panic if +// the parser has moved on the next expression. +// +// For performance reasons, go-toml doesn't make a copy of the input bytes to +// the parser. Make sure to copy all the bytes you need to outlive the slice +// given to the parser. +type Parser struct { + data []byte + builder builder + ref reference + left []byte + err error + first bool + + KeepComments bool +} + +// Data returns the slice provided to the last call to Reset. +func (p *Parser) Data() []byte { + return p.data +} + +// Range returns a range description that corresponds to a given slice of the +// input. If the argument is not a subslice of the parser input, this function +// panics. +func (p *Parser) Range(b []byte) Range { + return Range{ + Offset: uint32(danger.SubsliceOffset(p.data, b)), + Length: uint32(len(b)), + } +} + +// Raw returns the slice corresponding to the bytes in the given range. +func (p *Parser) Raw(raw Range) []byte { + return p.data[raw.Offset : raw.Offset+raw.Length] +} + +// Reset brings the parser to its initial state for a given input. It wipes an +// reuses internal storage to reduce allocation. +func (p *Parser) Reset(b []byte) { + p.builder.Reset() + p.ref = invalidReference + p.data = b + p.left = b + p.err = nil + p.first = true +} + +// NextExpression parses the next top-level expression. If an expression was +// successfully parsed, it returns true. If the parser is at the end of the +// document or an error occurred, it returns false. +// +// Retrieve the parsed expression with Expression(). +func (p *Parser) NextExpression() bool { + if len(p.left) == 0 || p.err != nil { + return false + } + + p.builder.Reset() + p.ref = invalidReference + + for { + if len(p.left) == 0 || p.err != nil { + return false + } + + if !p.first { + p.left, p.err = p.parseNewline(p.left) + } + + if len(p.left) == 0 || p.err != nil { + return false + } + + p.ref, p.left, p.err = p.parseExpression(p.left) + + if p.err != nil { + return false + } + + p.first = false + + if p.ref.Valid() { + return true + } + } +} + +// Expression returns a pointer to the node representing the last successfully +// parsed expression. +func (p *Parser) Expression() *Node { + return p.builder.NodeAt(p.ref) +} + +// Error returns any error that has occurred during parsing. +func (p *Parser) Error() error { + return p.err +} + +// Position describes a position in the input. +type Position struct { + // Number of bytes from the beginning of the input. + Offset int + // Line number, starting at 1. + Line int + // Column number, starting at 1. + Column int +} + +// Shape describes the position of a range in the input. +type Shape struct { + Start Position + End Position +} + +func (p *Parser) position(b []byte) Position { + offset := danger.SubsliceOffset(p.data, b) + + lead := p.data[:offset] + + return Position{ + Offset: offset, + Line: bytes.Count(lead, []byte{'\n'}) + 1, + Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), + } +} + +// Shape returns the shape of the given range in the input. Will +// panic if the range is not a subslice of the input. +func (p *Parser) Shape(r Range) Shape { + raw := p.Raw(r) + return Shape{ + Start: p.position(raw), + End: p.position(raw[r.Length:]), + } +} + +func (p *Parser) parseNewline(b []byte) ([]byte, error) { + if b[0] == '\n' { + return b[1:], nil + } + + if b[0] == '\r' { + _, rest, err := scanWindowsNewline(b) + return rest, err + } + + return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) +} + +func (p *Parser) parseComment(b []byte) (reference, []byte, error) { + ref := invalidReference + data, rest, err := scanComment(b) + if p.KeepComments && err == nil { + ref = p.builder.Push(Node{ + Kind: Comment, + Raw: p.Range(data), + Data: data, + }) + } + return ref, rest, err +} + +func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { + // expression = ws [ comment ] + // expression =/ ws keyval ws [ comment ] + // expression =/ ws table ws [ comment ] + ref := invalidReference + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ref, b, nil + } + + if b[0] == '#' { + ref, rest, err := p.parseComment(b) + return ref, rest, err + } + + if b[0] == '\n' || b[0] == '\r' { + return ref, b, nil + } + + var err error + if b[0] == '[' { + ref, b, err = p.parseTable(b) + } else { + ref, b, err = p.parseKeyval(b) + } + + if err != nil { + return ref, nil, err + } + + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + cref, rest, err := p.parseComment(b) + if cref != invalidReference { + p.builder.Chain(ref, cref) + } + return ref, rest, err + } + + return ref, b, nil +} + +func (p *Parser) parseTable(b []byte) (reference, []byte, error) { + // table = std-table / array-table + if len(b) > 1 && b[1] == '[' { + return p.parseArrayTable(b) + } + + return p.parseStdTable(b) +} + +func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { + // array-table = array-table-open key array-table-close + // array-table-open = %x5B.5B ws ; [[ Double left square bracket + // array-table-close = ws %x5D.5D ; ]] Double right square bracket + ref := p.builder.Push(Node{ + Kind: ArrayTable, + }) + + b = b[2:] + b = p.parseWhitespace(b) + + k, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, k) + b = p.parseWhitespace(b) + + b, err = expect(']', b) + if err != nil { + return ref, nil, err + } + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { + // std-table = std-table-open key std-table-close + // std-table-open = %x5B ws ; [ Left square bracket + // std-table-close = ws %x5D ; ] Right square bracket + ref := p.builder.Push(Node{ + Kind: Table, + }) + + b = b[1:] + b = p.parseWhitespace(b) + + key, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, key) + + b = p.parseWhitespace(b) + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { + // keyval = key keyval-sep val + ref := p.builder.Push(Node{ + Kind: KeyValue, + }) + + key, b, err := p.parseKey(b) + if err != nil { + return invalidReference, nil, err + } + + // keyval-sep = ws %x3D ws ; = + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") + } + + b, err = expect('=', b) + if err != nil { + return invalidReference, nil, err + } + + b = p.parseWhitespace(b) + + valRef, b, err := p.parseVal(b) + if err != nil { + return ref, b, err + } + + p.builder.Chain(valRef, key) + p.builder.AttachChild(ref, valRef) + + return ref, b, err +} + +//nolint:cyclop,funlen +func (p *Parser) parseVal(b []byte) (reference, []byte, error) { + // val = string / boolean / array / inline-table / date-time / float / integer + ref := invalidReference + + if len(b) == 0 { + return ref, nil, NewParserError(b, "expected value, not eof") + } + + var err error + c := b[0] + + switch c { + case '"': + var raw []byte + var v []byte + if scanFollowsMultilineBasicStringDelimiter(b) { + raw, v, b, err = p.parseMultilineBasicString(b) + } else { + raw, v, b, err = p.parseBasicString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case '\'': + var raw []byte + var v []byte + if scanFollowsMultilineLiteralStringDelimiter(b) { + raw, v, b, err = p.parseMultilineLiteralString(b) + } else { + raw, v, b, err = p.parseLiteralString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case 't': + if !scanFollowsTrue(b) { + return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:4], + }) + + return ref, b[4:], nil + case 'f': + if !scanFollowsFalse(b) { + return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:5], + }) + + return ref, b[5:], nil + case '[': + return p.parseValArray(b) + case '{': + return p.parseInlineTable(b) + default: + return p.parseIntOrFloatOrDateTime(b) + } +} + +func atmost(b []byte, n int) []byte { + if n >= len(b) { + return b + } + + return b[:n] +} + +func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { + v, rest, err := scanLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + return v, v[1 : len(v)-1], rest, nil +} + +func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { + // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + // inline-table-open = %x7B ws ; { + // inline-table-close = ws %x7D ; } + // inline-table-sep = ws %x2C ws ; , Comma + // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + parent := p.builder.Push(Node{ + Kind: InlineTable, + Raw: p.Range(b[:1]), + }) + + first := true + + var child reference + + b = b[1:] + + var err error + + for len(b) > 0 { + previousB := b + b = p.parseWhitespace(b) + + if len(b) == 0 { + return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") + } + + if b[0] == '}' { + break + } + + if !first { + b, err = expect(',', b) + if err != nil { + return parent, nil, err + } + b = p.parseWhitespace(b) + } + + var kv reference + + kv, b, err = p.parseKeyval(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, kv) + } else { + p.builder.Chain(child, kv) + } + child = kv + + first = false + } + + rest, err := expect('}', b) + + return parent, rest, err +} + +//nolint:funlen,cyclop +func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { + // array = array-open [ array-values ] ws-comment-newline array-close + // array-open = %x5B ; [ + // array-close = %x5D ; ] + // array-values = ws-comment-newline val ws-comment-newline array-sep array-values + // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + // array-sep = %x2C ; , Comma + // ws-comment-newline = *( wschar / [ comment ] newline ) + arrayStart := b + b = b[1:] + + parent := p.builder.Push(Node{ + Kind: Array, + }) + + // First indicates whether the parser is looking for the first element + // (non-comment) of the array. + first := true + + lastChild := invalidReference + + addChild := func(valueRef reference) { + if lastChild == invalidReference { + p.builder.AttachChild(parent, valueRef) + } else { + p.builder.Chain(lastChild, valueRef) + } + lastChild = valueRef + } + + var err error + for len(b) > 0 { + cref := invalidReference + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + + if cref != invalidReference { + addChild(cref) + } + + if len(b) == 0 { + return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") + } + + if b[0] == ']' { + break + } + + if b[0] == ',' { + if first { + return parent, nil, NewParserError(b[0:1], "array cannot start with comma") + } + b = b[1:] + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + } else if !first { + return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") + } + + // TOML allows trailing commas in arrays. + if len(b) > 0 && b[0] == ']' { + break + } + + var valueRef reference + valueRef, b, err = p.parseVal(b) + if err != nil { + return parent, nil, err + } + + addChild(valueRef) + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + + first = false + } + + rest, err := expect(']', b) + + return parent, rest, err +} + +func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { + rootCommentRef := invalidReference + latestCommentRef := invalidReference + + addComment := func(ref reference) { + if rootCommentRef == invalidReference { + rootCommentRef = ref + } else if latestCommentRef == invalidReference { + p.builder.AttachChild(rootCommentRef, ref) + latestCommentRef = ref + } else { + p.builder.Chain(latestCommentRef, ref) + latestCommentRef = ref + } + } + + for len(b) > 0 { + var err error + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + var ref reference + ref, b, err = p.parseComment(b) + if err != nil { + return invalidReference, nil, err + } + if ref != invalidReference { + addComment(ref) + } + } + + if len(b) == 0 { + break + } + + if b[0] == '\n' || b[0] == '\r' { + b, err = p.parseNewline(b) + if err != nil { + return invalidReference, nil, err + } + } else { + break + } + } + + return rootCommentRef, b, nil +} + +func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { + token, rest, err := scanMultilineLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + return token, token[i : len(token)-3], rest, err +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + token, escaped, rest, err := scanMultilineBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + // fast path + startIdx := i + endIdx := len(token) - len(`"""`) + + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-3 { + c := token[i] + + //nolint:nestif + if c == '\\' { + // When the last non-whitespace character on a line is an unescaped \, + // it will be trimmed along with all whitespace (including newlines) up + // to the next non-whitespace character or closing delimiter. + + isLastNonWhitespaceOnLine := false + j := 1 + findEOLLoop: + for ; j < len(token)-3-i; j++ { + switch token[i+j] { + case ' ', '\t': + continue + case '\r': + if token[i+j+1] == '\n' { + continue + } + case '\n': + isLastNonWhitespaceOnLine = true + } + break findEOLLoop + } + if isLastNonWhitespaceOnLine { + i += j + for ; i < len(token)-3; i++ { + c := token[i] + if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + i-- + break + } + } + i++ + continue + } + + // handle escaping + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(atmost(token[i+1:], 4), 4) + if err != nil { + return nil, nil, nil, err + } + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(atmost(token[i+1:], 8), 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func (p *Parser) parseKey(b []byte) (reference, []byte, error) { + // key = simple-key / dotted-key + // simple-key = quoted-key / unquoted-key + // + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + // dotted-key = simple-key 1*( dot-sep simple-key ) + // + // dot-sep = ws %x2E ws ; . Period + raw, key, b, err := p.parseSimpleKey(b) + if err != nil { + return invalidReference, nil, err + } + + ref := p.builder.Push(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + + for { + b = p.parseWhitespace(b) + if len(b) > 0 && b[0] == '.' { + b = p.parseWhitespace(b[1:]) + + raw, key, b, err = p.parseSimpleKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.PushAndChain(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + } else { + break + } + } + + return ref, b, nil +} + +func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { + if len(b) == 0 { + return nil, nil, nil, NewParserError(b, "expected key but found none") + } + + // simple-key = quoted-key / unquoted-key + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + switch { + case b[0] == '\'': + return p.parseLiteralString(b) + case b[0] == '"': + return p.parseBasicString(b) + case isUnquotedKeyChar(b[0]): + key, rest = scanUnquotedKey(b) + return key, key, rest, nil + default: + return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) + } +} + +//nolint:funlen,cyclop +func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + // escape-seq-char = %x22 ; " quotation mark U+0022 + // escape-seq-char =/ %x5C ; \ reverse solidus U+005C + // escape-seq-char =/ %x62 ; b backspace U+0008 + // escape-seq-char =/ %x66 ; f form feed U+000C + // escape-seq-char =/ %x6E ; n line feed U+000A + // escape-seq-char =/ %x72 ; r carriage return U+000D + // escape-seq-char =/ %x74 ; t tab U+0009 + // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX + // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + token, escaped, rest, err := scanBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + startIdx := len(`"`) + endIdx := len(token) - len(`"`) + + // Fast path. If there is no escape sequence, the string should just be + // an UTF-8 encoded string, which is the same as Go. In that case, + // validate the string and return a direct reference to the buffer. + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + i := startIdx + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-1 { + c := token[i] + if c == '\\' { + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(token[i+1:len(token)-1], 4) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(token[i+1:len(token)-1], 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func hexToRune(b []byte, length int) (rune, error) { + if len(b) < length { + return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) + } + b = b[:length] + + var r uint32 + for i, c := range b { + d := uint32(0) + switch { + case '0' <= c && c <= '9': + d = uint32(c - '0') + case 'a' <= c && c <= 'f': + d = uint32(c - 'a' + 10) + case 'A' <= c && c <= 'F': + d = uint32(c - 'A' + 10) + default: + return -1, NewParserError(b[i:i+1], "non-hex character") + } + r = r*16 + d + } + + if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { + return -1, NewParserError(b, "escape sequence is invalid Unicode code point") + } + + return rune(r), nil +} + +func (p *Parser) parseWhitespace(b []byte) []byte { + // ws = *wschar + // wschar = %x20 ; Space + // wschar =/ %x09 ; Horizontal tab + _, rest := scanWhitespace(b) + + return rest +} + +//nolint:cyclop +func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { + switch b[0] { + case 'i': + if !scanFollowsInf(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case 'n': + if !scanFollowsNan(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case '+', '-': + return p.scanIntOrFloat(b) + } + + if len(b) < 3 { + return p.scanIntOrFloat(b) + } + + s := 5 + if len(b) < s { + s = len(b) + } + + for idx, c := range b[:s] { + if isDigit(c) { + continue + } + + if idx == 2 && c == ':' || (idx == 4 && c == '-') { + return p.scanDateTime(b) + } + + break + } + + return p.scanIntOrFloat(b) +} + +func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { + // scans for contiguous characters in [0-9T:Z.+-], and up to one space if + // followed by a digit. + hasDate := false + hasTime := false + hasTz := false + seenSpace := false + + i := 0 +byteLoop: + for ; i < len(b); i++ { + c := b[i] + + switch { + case isDigit(c): + case c == '-': + hasDate = true + const minOffsetOfTz = 8 + if i >= minOffsetOfTz { + hasTz = true + } + case c == 'T' || c == 't' || c == ':' || c == '.': + hasTime = true + case c == '+' || c == '-' || c == 'Z' || c == 'z': + hasTz = true + case c == ' ': + if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { + i += 2 + // Avoid reaching past the end of the document in case the time + // is malformed. See TestIssue585. + if i >= len(b) { + i-- + } + seenSpace = true + hasTime = true + } else { + break byteLoop + } + default: + break byteLoop + } + } + + var kind Kind + + if hasTime { + if hasDate { + if hasTz { + kind = DateTime + } else { + kind = LocalDateTime + } + } else { + kind = LocalTime + } + } else { + kind = LocalDate + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { + i := 0 + + if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { + var isValidRune validRuneFn + + switch b[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + i++ + } + + if isValidRune != nil { + i += 2 + for ; i < len(b); i++ { + if !isValidRune(b[i]) { + break + } + } + } + + return p.builder.Push(Node{ + Kind: Integer, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil + } + + isFloat := false + + for ; i < len(b); i++ { + c := b[i] + + if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { + continue + } + + if c == '.' || c == 'e' || c == 'E' { + isFloat = true + + continue + } + + if c == 'i' { + if scanFollowsInf(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") + } + + if c == 'n' { + if scanFollowsNan(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") + } + + break + } + + if i == 0 { + return invalidReference, b, NewParserError(b, "incomplete number") + } + + kind := Integer + + if isFloat { + kind = Float + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} + +type validRuneFn func(r byte) bool + +func isValidHexRune(r byte) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r byte) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r byte) bool { + return r == '0' || r == '1' || r == '_' +} + +func expect(x byte, b []byte) ([]byte, error) { + if len(b) == 0 { + return nil, NewParserError(b, "expected character %c but the document ended here", x) + } + + if b[0] != x { + return nil, NewParserError(b[0:1], "expected character %c", x) + } + + return b[1:], nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go new file mode 100644 index 00000000..0512181d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go @@ -0,0 +1,270 @@ +package unstable + +import "github.com/pelletier/go-toml/v2/internal/characters" + +func scanFollows(b []byte, pattern string) bool { + n := len(pattern) + + return len(b) >= n && string(b[:n]) == pattern +} + +func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { + return scanFollows(b, `"""`) +} + +func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { + return scanFollows(b, `'''`) +} + +func scanFollowsTrue(b []byte) bool { + return scanFollows(b, `true`) +} + +func scanFollowsFalse(b []byte) bool { + return scanFollows(b, `false`) +} + +func scanFollowsInf(b []byte) bool { + return scanFollows(b, `inf`) +} + +func scanFollowsNan(b []byte) bool { + return scanFollows(b, `nan`) +} + +func scanUnquotedKey(b []byte) ([]byte, []byte) { + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + for i := 0; i < len(b); i++ { + if !isUnquotedKeyChar(b[i]) { + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func isUnquotedKeyChar(r byte) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' +} + +func scanLiteralString(b []byte) ([]byte, []byte, error) { + // literal-string = apostrophe *literal-char apostrophe + // apostrophe = %x27 ; ' apostrophe + // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + for i := 1; i < len(b); { + switch b[i] { + case '\'': + return b[:i+1], b[i+1:], nil + case '\n', '\r': + return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], "unterminated literal string") +} + +func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { + // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + // ml-literal-string-delim + // ml-literal-string-delim = 3apostrophe + // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + // + // mll-content = mll-char / newline + // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii + // mll-quotes = 1*2apostrophe + for i := 3; i < len(b); { + switch b[i] { + case '\'': + if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i < len(b) && b[i] == '\'' { + return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") + } + + return b[:i], b[i:], nil + } + case '\r': + if len(b) < i+2 { + return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i += 2 // skip the \n + continue + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) +} + +func scanWindowsNewline(b []byte) ([]byte, []byte, error) { + const lenCRLF = 2 + if len(b) < lenCRLF { + return nil, nil, NewParserError(b, "windows new line expected") + } + + if b[1] != '\n' { + return nil, nil, NewParserError(b, `windows new line should be \r\n`) + } + + return b[:lenCRLF], b[lenCRLF:], nil +} + +func scanWhitespace(b []byte) ([]byte, []byte) { + for i := 0; i < len(b); i++ { + switch b[i] { + case ' ', '\t': + continue + default: + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func scanComment(b []byte) ([]byte, []byte, error) { + // comment-start-symbol = %x23 ; # + // non-ascii = %x80-D7FF / %xE000-10FFFF + // non-eol = %x09 / %x20-7F / non-ascii + // + // comment = comment-start-symbol *non-eol + + for i := 1; i < len(b); { + if b[i] == '\n' { + return b[:i], b[i:], nil + } + if b[i] == '\r' { + if i+1 < len(b) && b[i+1] == '\n' { + return b[:i+1], b[i+1:], nil + } + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + + i += size + } + + return b, b[len(b):], nil +} + +func scanBasicString(b []byte) ([]byte, bool, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + escaped := false + i := 1 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + return b[:i+1], escaped, b[i+1:], nil + case '\n', '\r': + return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") + } + escaped = true + i++ // skip the next character + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) +} + +func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + + escaped := false + i := 3 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + if scanFollowsMultilineBasicStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i < len(b) && b[i] == '"' { + return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) + } + + return b[:i], escaped, b[i:], nil + } + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") + } + escaped = true + i++ // skip the next character + case '\r': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i++ // skip the \n + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 00000000..00cfd6de --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/pjbgf/sha1cd/BUILD.bazel b/vendor/github.com/pjbgf/sha1cd/BUILD.bazel new file mode 100644 index 00000000..83cea746 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sha1cd", + srcs = [ + "detection.go", + "sha1cd.go", + "sha1cdblock_amd64.go", + "sha1cdblock_amd64.s", + "sha1cdblock_generic.go", + "sha1cdblock_noasm.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pjbgf/sha1cd", + importpath = "github.com/pjbgf/sha1cd", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/pjbgf/sha1cd/internal", + "//vendor/github.com/pjbgf/sha1cd/ubc", + ], +) diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm new file mode 100644 index 00000000..99761296 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm @@ -0,0 +1,23 @@ +FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d + +ENV GOOS=linux +ENV GOARCH=arm +ENV CGO_ENABLED=1 +ENV CC=arm-linux-gnueabihf-gcc +ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}" +ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig + +RUN dpkg --add-architecture armhf \ + && apt update \ + && apt install -y --no-install-recommends \ + upx \ + gcc-arm-linux-gnueabihf \ + libc6-dev-armhf-cross \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* + +COPY . /src/workdir + +WORKDIR /src/workdir + +RUN go build ./... diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 new file mode 100644 index 00000000..66bd0947 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 @@ -0,0 +1,23 @@ +FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d + +ENV GOOS=linux +ENV GOARCH=arm64 +ENV CGO_ENABLED=1 +ENV CC=aarch64-linux-gnu-gcc +ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}" +ENV PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig + +# install build & runtime dependencies +RUN dpkg --add-architecture arm64 \ + && apt update \ + && apt install -y --no-install-recommends \ + gcc-aarch64-linux-gnu \ + libc6-dev-arm64-cross \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* + +COPY . /src/workdir + +WORKDIR /src/workdir + +RUN go build ./... diff --git a/vendor/github.com/pjbgf/sha1cd/LICENSE b/vendor/github.com/pjbgf/sha1cd/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pjbgf/sha1cd/Makefile b/vendor/github.com/pjbgf/sha1cd/Makefile new file mode 100644 index 00000000..b24f2cba --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/Makefile @@ -0,0 +1,40 @@ +FUZZ_TIME ?= 1m + +export CGO_ENABLED := 1 + +.PHONY: test +test: + go test ./... + +.PHONY: bench +bench: + go test -benchmem -run=^$$ -bench ^Benchmark ./... + +.PHONY: fuzz +fuzz: + go test -tags gofuzz -fuzz=. -fuzztime=$(FUZZ_TIME) ./test/ + +# Cross build project in arm/v7. +build-arm: + docker build -t sha1cd-arm -f Dockerfile.arm . + docker run --rm sha1cd-arm + +# Cross build project in arm64. +build-arm64: + docker build -t sha1cd-arm64 -f Dockerfile.arm64 . + docker run --rm sha1cd-arm64 + +# Build with cgo disabled. +build-nocgo: + CGO_ENABLED=0 go build ./cgo + +# Run cross-compilation to assure supported architectures. +cross-build: build-arm build-arm64 build-nocgo + +generate: + go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s + sed -i 's;&\samd64;&\n// +build !noasm,gc,amd64;g' sha1cdblock_amd64.s + +verify: generate + git diff --exit-code + go vet ./... diff --git a/vendor/github.com/pjbgf/sha1cd/README.md b/vendor/github.com/pjbgf/sha1cd/README.md new file mode 100644 index 00000000..378cf78c --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/README.md @@ -0,0 +1,58 @@ +# sha1cd + +A Go implementation of SHA1 with counter-cryptanalysis, which detects +collision attacks. + +The `cgo/lib` code is a carbon copy of the [original code], based on +the award winning [white paper] by Marc Stevens. + +The Go implementation is largely based off Go's generic sha1. +At present no SIMD optimisations have been implemented. + +## Usage + +`sha1cd` can be used as a drop-in replacement for `crypto/sha1`: + +```golang +import "github.com/pjbgf/sha1cd" + +func test(){ + data := []byte("data to be sha1 hashed") + h := sha1cd.Sum(data) + fmt.Printf("hash: %q\n", hex.EncodeToString(h)) +} +``` + +To obtain information as to whether a collision was found, use the +func `CollisionResistantSum`. + +```golang +import "github.com/pjbgf/sha1cd" + +func test(){ + data := []byte("data to be sha1 hashed") + h, col := sha1cd.CollisionResistantSum(data) + if col { + fmt.Println("collision found!") + } + fmt.Printf("hash: %q", hex.EncodeToString(h)) +} +``` + +Note that the algorithm will automatically avoid collision, by +extending the SHA1 to 240-steps, instead of 80 when a collision +attempt is detected. Therefore, inputs that contains the unavoidable +bit conditions will yield a different hash from `sha1cd`, when compared +with results using `crypto/sha1`. Valid inputs will have matching the outputs. + +## References +- https://shattered.io/ +- https://github.com/cr-marcstevens/sha1collisiondetection +- https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program/Secure-Hashing#shavs + +## Use of the Original Implementation +- https://github.com/git/git/commit/28dc98e343ca4eb370a29ceec4c19beac9b5c01e +- https://github.com/libgit2/libgit2/pull/4136 + +[original code]: https://github.com/cr-marcstevens/sha1collisiondetection +[white paper]: https://marc-stevens.nl/research/papers/C13-S.pdf diff --git a/vendor/github.com/pjbgf/sha1cd/detection.go b/vendor/github.com/pjbgf/sha1cd/detection.go new file mode 100644 index 00000000..a1458748 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/detection.go @@ -0,0 +1,11 @@ +package sha1cd + +import "hash" + +type CollisionResistantHash interface { + // CollisionResistantSum extends on Sum by returning an additional boolean + // which indicates whether a collision was found during the hashing process. + CollisionResistantSum(b []byte) ([]byte, bool) + + hash.Hash +} diff --git a/vendor/github.com/pjbgf/sha1cd/internal/BUILD.bazel b/vendor/github.com/pjbgf/sha1cd/internal/BUILD.bazel new file mode 100644 index 00000000..d5a6cf4f --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/internal/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "internal", + srcs = ["const.go"], + importmap = "peridot.resf.org/vendor/github.com/pjbgf/sha1cd/internal", + importpath = "github.com/pjbgf/sha1cd/internal", + visibility = ["//vendor/github.com/pjbgf/sha1cd:__subpackages__"], +) diff --git a/vendor/github.com/pjbgf/sha1cd/internal/const.go b/vendor/github.com/pjbgf/sha1cd/internal/const.go new file mode 100644 index 00000000..944a131d --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/internal/const.go @@ -0,0 +1,42 @@ +package shared + +const ( + // Constants for the SHA-1 hash function. + K0 = 0x5A827999 + K1 = 0x6ED9EBA1 + K2 = 0x8F1BBCDC + K3 = 0xCA62C1D6 + + // Initial values for the buffer variables: h0, h1, h2, h3, h4. + Init0 = 0x67452301 + Init1 = 0xEFCDAB89 + Init2 = 0x98BADCFE + Init3 = 0x10325476 + Init4 = 0xC3D2E1F0 + + // Initial values for the temporary variables (ihvtmp0, ihvtmp1, ihvtmp2, ihvtmp3, ihvtmp4) during the SHA recompression step. + InitTmp0 = 0xD5 + InitTmp1 = 0x394 + InitTmp2 = 0x8152A8 + InitTmp3 = 0x0 + InitTmp4 = 0xA7ECE0 + + // SHA1 contains 2 buffers, each based off 5 32-bit words. + WordBuffers = 5 + + // The output of SHA1 is 20 bytes (160 bits). + Size = 20 + + // Rounds represents the number of steps required to process each chunk. + Rounds = 80 + + // SHA1 processes the input data in chunks. Each chunk contains 64 bytes. + Chunk = 64 + + // The number of pre-step compression state to store. + // Currently there are 3 pre-step compression states required: 0, 58, 65. + PreStepState = 3 + + Magic = "shacd\x01" + MarshaledSize = len(Magic) + 5*4 + Chunk + 8 +) diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cd.go b/vendor/github.com/pjbgf/sha1cd/sha1cd.go new file mode 100644 index 00000000..a69e480e --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/sha1cd.go @@ -0,0 +1,227 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha1cd implements collision detection based on the whitepaper +// Counter-cryptanalysis from Marc Stevens. The original ubc implementation +// was done by Marc Stevens and Dan Shumow, and can be found at: +// https://github.com/cr-marcstevens/sha1collisiondetection +package sha1cd + +// This SHA1 implementation is based on Go's generic SHA1. +// Original: https://github.com/golang/go/blob/master/src/crypto/sha1/sha1.go + +import ( + "crypto" + "encoding/binary" + "errors" + "hash" + + shared "github.com/pjbgf/sha1cd/internal" +) + +func init() { + crypto.RegisterHash(crypto.SHA1, New) +} + +// The size of a SHA-1 checksum in bytes. +const Size = shared.Size + +// The blocksize of SHA-1 in bytes. +const BlockSize = shared.Chunk + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [shared.WordBuffers]uint32 + x [shared.Chunk]byte + nx int + len uint64 + + // col defines whether a collision has been found. + col bool + blockFunc func(dig *digest, p []byte) +} + +func (d *digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, shared.MarshaledSize) + b = append(b, shared.Magic...) + b = appendUint32(b, d.h[0]) + b = appendUint32(b, d.h[1]) + b = appendUint32(b, d.h[2]) + b = appendUint32(b, d.h[3]) + b = appendUint32(b, d.h[4]) + b = append(b, d.x[:d.nx]...) + b = b[:len(b)+len(d.x)-d.nx] // already zero + b = appendUint64(b, d.len) + return b, nil +} + +func appendUint32(b []byte, v uint32) []byte { + return append(b, + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v), + ) +} + +func appendUint64(b []byte, v uint64) []byte { + return append(b, + byte(v>>56), + byte(v>>48), + byte(v>>40), + byte(v>>32), + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v), + ) +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(shared.Magic) || string(b[:len(shared.Magic)]) != shared.Magic { + return errors.New("crypto/sha1: invalid hash state identifier") + } + if len(b) != shared.MarshaledSize { + return errors.New("crypto/sha1: invalid hash state size") + } + b = b[len(shared.Magic):] + b, d.h[0] = consumeUint32(b) + b, d.h[1] = consumeUint32(b) + b, d.h[2] = consumeUint32(b) + b, d.h[3] = consumeUint32(b) + b, d.h[4] = consumeUint32(b) + b = b[copy(d.x[:], b):] + b, d.len = consumeUint64(b) + d.nx = int(d.len % shared.Chunk) + return nil +} + +func consumeUint64(b []byte) ([]byte, uint64) { + _ = b[7] + x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[shared.WordBuffers])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + _ = b[3] + x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return b[4:], x +} + +func (d *digest) Reset() { + d.h[0] = shared.Init0 + d.h[1] = shared.Init1 + d.h[2] = shared.Init2 + d.h[3] = shared.Init3 + d.h[4] = shared.Init4 + d.nx = 0 + d.len = 0 + + d.col = false +} + +// New returns a new hash.Hash computing the SHA1 checksum. The Hash also +// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to +// marshal and unmarshal the internal state of the hash. +func New() hash.Hash { + d := new(digest) + + d.blockFunc = block + d.Reset() + return d +} + +// NewGeneric is equivalent to New but uses the Go generic implementation, +// avoiding any processor-specific optimizations. +func NewGeneric() hash.Hash { + d := new(digest) + + d.blockFunc = blockGeneric + d.Reset() + return d +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + if len(p) == 0 { + return + } + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == shared.Chunk { + d.blockFunc(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= shared.Chunk { + n := len(p) &^ (shared.Chunk - 1) + d.blockFunc(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + binary.BigEndian.PutUint64(tmp[:], len) + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + + binary.BigEndian.PutUint32(digest[0:], d.h[0]) + binary.BigEndian.PutUint32(digest[4:], d.h[1]) + binary.BigEndian.PutUint32(digest[8:], d.h[2]) + binary.BigEndian.PutUint32(digest[12:], d.h[3]) + binary.BigEndian.PutUint32(digest[16:], d.h[4]) + + return digest +} + +// Sum returns the SHA-1 checksum of the data. +func Sum(data []byte) ([Size]byte, bool) { + d := New().(*digest) + d.Write(data) + return d.checkSum(), d.col +} + +func (d *digest) CollisionResistantSum(in []byte) ([]byte, bool) { + // Make a copy of d so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...), d0.col +} diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go new file mode 100644 index 00000000..95e08308 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go @@ -0,0 +1,50 @@ +//go:build !noasm && gc && amd64 +// +build !noasm,gc,amd64 + +package sha1cd + +import ( + "math" + "unsafe" + + shared "github.com/pjbgf/sha1cd/internal" +) + +type sliceHeader struct { + base uintptr + len int + cap int +} + +// blockAMD64 hashes the message p into the current state in dig. +// Both m1 and cs are used to store intermediate results which are used by the collision detection logic. +// +//go:noescape +func blockAMD64(dig *digest, p sliceHeader, m1 []uint32, cs [][5]uint32) + +func block(dig *digest, p []byte) { + m1 := [shared.Rounds]uint32{} + cs := [shared.PreStepState][shared.WordBuffers]uint32{} + + for len(p) >= shared.Chunk { + // Only send a block to be processed, as the collission detection + // works on a block by block basis. + ips := sliceHeader{ + base: uintptr(unsafe.Pointer(&p[0])), + len: int(math.Min(float64(len(p)), float64(shared.Chunk))), + cap: shared.Chunk, + } + + blockAMD64(dig, ips, m1[:], cs[:]) + + col := checkCollision(m1, cs, dig.h) + if col { + dig.col = true + + blockAMD64(dig, ips, m1[:], cs[:]) + blockAMD64(dig, ips, m1[:], cs[:]) + } + + p = p[shared.Chunk:] + } +} diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s new file mode 100644 index 00000000..86f9821c --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s @@ -0,0 +1,2274 @@ +// Code generated by command: go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s. DO NOT EDIT. + +//go:build !noasm && gc && amd64 +// +build !noasm,gc,amd64 + +#include "textflag.h" + +// func blockAMD64(dig *digest, p []byte, m1 []uint32, cs [][5]uint32) +TEXT ·blockAMD64(SB), NOSPLIT, $64-80 + MOVQ dig+0(FP), R8 + MOVQ p_base+8(FP), DI + MOVQ p_len+16(FP), DX + SHRQ $+6, DX + SHLQ $+6, DX + LEAQ (DI)(DX*1), SI + + // Load h0, h1, h2, h3, h4. + MOVL (R8), AX + MOVL 4(R8), BX + MOVL 8(R8), CX + MOVL 12(R8), DX + MOVL 16(R8), BP + + // len(p) >= chunk + CMPQ DI, SI + JEQ end + +loop: + // Initialize registers a, b, c, d, e. + MOVL AX, R10 + MOVL BX, R11 + MOVL CX, R12 + MOVL DX, R13 + MOVL BP, R14 + + // ROUND1 (steps 0-15) + // Load cs + MOVQ cs_base+56(FP), R8 + MOVL R10, (R8) + MOVL R11, 4(R8) + MOVL R12, 8(R8) + MOVL R13, 12(R8) + MOVL R14, 16(R8) + + // ROUND1(0) + // LOAD + MOVL (DI), R9 + BSWAPL R9 + MOVL R9, (SP) + + // FUNC1 + MOVL R13, R15 + XORL R12, R15 + ANDL R11, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1518500249(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL (SP), R9 + MOVL R9, (R8) + + // ROUND1(1) + // LOAD + MOVL 4(DI), R9 + BSWAPL R9 + MOVL R9, 4(SP) + + // FUNC1 + MOVL R12, R15 + XORL R11, R15 + ANDL R10, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1518500249(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 4(SP), R9 + MOVL R9, 4(R8) + + // ROUND1(2) + // LOAD + MOVL 8(DI), R9 + BSWAPL R9 + MOVL R9, 8(SP) + + // FUNC1 + MOVL R11, R15 + XORL R10, R15 + ANDL R14, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1518500249(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 8(SP), R9 + MOVL R9, 8(R8) + + // ROUND1(3) + // LOAD + MOVL 12(DI), R9 + BSWAPL R9 + MOVL R9, 12(SP) + + // FUNC1 + MOVL R10, R15 + XORL R14, R15 + ANDL R13, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1518500249(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 12(SP), R9 + MOVL R9, 12(R8) + + // ROUND1(4) + // LOAD + MOVL 16(DI), R9 + BSWAPL R9 + MOVL R9, 16(SP) + + // FUNC1 + MOVL R14, R15 + XORL R13, R15 + ANDL R12, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1518500249(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 16(SP), R9 + MOVL R9, 16(R8) + + // ROUND1(5) + // LOAD + MOVL 20(DI), R9 + BSWAPL R9 + MOVL R9, 20(SP) + + // FUNC1 + MOVL R13, R15 + XORL R12, R15 + ANDL R11, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1518500249(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 20(SP), R9 + MOVL R9, 20(R8) + + // ROUND1(6) + // LOAD + MOVL 24(DI), R9 + BSWAPL R9 + MOVL R9, 24(SP) + + // FUNC1 + MOVL R12, R15 + XORL R11, R15 + ANDL R10, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1518500249(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 24(SP), R9 + MOVL R9, 24(R8) + + // ROUND1(7) + // LOAD + MOVL 28(DI), R9 + BSWAPL R9 + MOVL R9, 28(SP) + + // FUNC1 + MOVL R11, R15 + XORL R10, R15 + ANDL R14, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1518500249(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 28(SP), R9 + MOVL R9, 28(R8) + + // ROUND1(8) + // LOAD + MOVL 32(DI), R9 + BSWAPL R9 + MOVL R9, 32(SP) + + // FUNC1 + MOVL R10, R15 + XORL R14, R15 + ANDL R13, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1518500249(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 32(SP), R9 + MOVL R9, 32(R8) + + // ROUND1(9) + // LOAD + MOVL 36(DI), R9 + BSWAPL R9 + MOVL R9, 36(SP) + + // FUNC1 + MOVL R14, R15 + XORL R13, R15 + ANDL R12, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1518500249(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 36(SP), R9 + MOVL R9, 36(R8) + + // ROUND1(10) + // LOAD + MOVL 40(DI), R9 + BSWAPL R9 + MOVL R9, 40(SP) + + // FUNC1 + MOVL R13, R15 + XORL R12, R15 + ANDL R11, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1518500249(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 40(SP), R9 + MOVL R9, 40(R8) + + // ROUND1(11) + // LOAD + MOVL 44(DI), R9 + BSWAPL R9 + MOVL R9, 44(SP) + + // FUNC1 + MOVL R12, R15 + XORL R11, R15 + ANDL R10, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1518500249(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 44(SP), R9 + MOVL R9, 44(R8) + + // ROUND1(12) + // LOAD + MOVL 48(DI), R9 + BSWAPL R9 + MOVL R9, 48(SP) + + // FUNC1 + MOVL R11, R15 + XORL R10, R15 + ANDL R14, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1518500249(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 48(SP), R9 + MOVL R9, 48(R8) + + // ROUND1(13) + // LOAD + MOVL 52(DI), R9 + BSWAPL R9 + MOVL R9, 52(SP) + + // FUNC1 + MOVL R10, R15 + XORL R14, R15 + ANDL R13, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1518500249(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 52(SP), R9 + MOVL R9, 52(R8) + + // ROUND1(14) + // LOAD + MOVL 56(DI), R9 + BSWAPL R9 + MOVL R9, 56(SP) + + // FUNC1 + MOVL R14, R15 + XORL R13, R15 + ANDL R12, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1518500249(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 56(SP), R9 + MOVL R9, 56(R8) + + // ROUND1(15) + // LOAD + MOVL 60(DI), R9 + BSWAPL R9 + MOVL R9, 60(SP) + + // FUNC1 + MOVL R13, R15 + XORL R12, R15 + ANDL R11, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1518500249(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 60(SP), R9 + MOVL R9, 60(R8) + + // ROUND1x (steps 16-19) - same as ROUND1 but with no data load. + // ROUND1x(16) + // SHUFFLE + MOVL (SP), R9 + XORL 52(SP), R9 + XORL 32(SP), R9 + XORL 8(SP), R9 + ROLL $+1, R9 + MOVL R9, (SP) + + // FUNC1 + MOVL R12, R15 + XORL R11, R15 + ANDL R10, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1518500249(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL (SP), R9 + MOVL R9, 64(R8) + + // ROUND1x(17) + // SHUFFLE + MOVL 4(SP), R9 + XORL 56(SP), R9 + XORL 36(SP), R9 + XORL 12(SP), R9 + ROLL $+1, R9 + MOVL R9, 4(SP) + + // FUNC1 + MOVL R11, R15 + XORL R10, R15 + ANDL R14, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1518500249(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 4(SP), R9 + MOVL R9, 68(R8) + + // ROUND1x(18) + // SHUFFLE + MOVL 8(SP), R9 + XORL 60(SP), R9 + XORL 40(SP), R9 + XORL 16(SP), R9 + ROLL $+1, R9 + MOVL R9, 8(SP) + + // FUNC1 + MOVL R10, R15 + XORL R14, R15 + ANDL R13, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1518500249(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 8(SP), R9 + MOVL R9, 72(R8) + + // ROUND1x(19) + // SHUFFLE + MOVL 12(SP), R9 + XORL (SP), R9 + XORL 44(SP), R9 + XORL 20(SP), R9 + ROLL $+1, R9 + MOVL R9, 12(SP) + + // FUNC1 + MOVL R14, R15 + XORL R13, R15 + ANDL R12, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1518500249(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 12(SP), R9 + MOVL R9, 76(R8) + + // ROUND2 (steps 20-39) + // ROUND2(20) + // SHUFFLE + MOVL 16(SP), R9 + XORL 4(SP), R9 + XORL 48(SP), R9 + XORL 24(SP), R9 + ROLL $+1, R9 + MOVL R9, 16(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1859775393(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 16(SP), R9 + MOVL R9, 80(R8) + + // ROUND2(21) + // SHUFFLE + MOVL 20(SP), R9 + XORL 8(SP), R9 + XORL 52(SP), R9 + XORL 28(SP), R9 + ROLL $+1, R9 + MOVL R9, 20(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1859775393(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 20(SP), R9 + MOVL R9, 84(R8) + + // ROUND2(22) + // SHUFFLE + MOVL 24(SP), R9 + XORL 12(SP), R9 + XORL 56(SP), R9 + XORL 32(SP), R9 + ROLL $+1, R9 + MOVL R9, 24(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1859775393(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 24(SP), R9 + MOVL R9, 88(R8) + + // ROUND2(23) + // SHUFFLE + MOVL 28(SP), R9 + XORL 16(SP), R9 + XORL 60(SP), R9 + XORL 36(SP), R9 + ROLL $+1, R9 + MOVL R9, 28(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1859775393(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 28(SP), R9 + MOVL R9, 92(R8) + + // ROUND2(24) + // SHUFFLE + MOVL 32(SP), R9 + XORL 20(SP), R9 + XORL (SP), R9 + XORL 40(SP), R9 + ROLL $+1, R9 + MOVL R9, 32(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1859775393(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 32(SP), R9 + MOVL R9, 96(R8) + + // ROUND2(25) + // SHUFFLE + MOVL 36(SP), R9 + XORL 24(SP), R9 + XORL 4(SP), R9 + XORL 44(SP), R9 + ROLL $+1, R9 + MOVL R9, 36(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1859775393(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 36(SP), R9 + MOVL R9, 100(R8) + + // ROUND2(26) + // SHUFFLE + MOVL 40(SP), R9 + XORL 28(SP), R9 + XORL 8(SP), R9 + XORL 48(SP), R9 + ROLL $+1, R9 + MOVL R9, 40(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1859775393(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 40(SP), R9 + MOVL R9, 104(R8) + + // ROUND2(27) + // SHUFFLE + MOVL 44(SP), R9 + XORL 32(SP), R9 + XORL 12(SP), R9 + XORL 52(SP), R9 + ROLL $+1, R9 + MOVL R9, 44(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1859775393(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 44(SP), R9 + MOVL R9, 108(R8) + + // ROUND2(28) + // SHUFFLE + MOVL 48(SP), R9 + XORL 36(SP), R9 + XORL 16(SP), R9 + XORL 56(SP), R9 + ROLL $+1, R9 + MOVL R9, 48(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1859775393(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 48(SP), R9 + MOVL R9, 112(R8) + + // ROUND2(29) + // SHUFFLE + MOVL 52(SP), R9 + XORL 40(SP), R9 + XORL 20(SP), R9 + XORL 60(SP), R9 + ROLL $+1, R9 + MOVL R9, 52(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1859775393(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 52(SP), R9 + MOVL R9, 116(R8) + + // ROUND2(30) + // SHUFFLE + MOVL 56(SP), R9 + XORL 44(SP), R9 + XORL 24(SP), R9 + XORL (SP), R9 + ROLL $+1, R9 + MOVL R9, 56(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1859775393(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 56(SP), R9 + MOVL R9, 120(R8) + + // ROUND2(31) + // SHUFFLE + MOVL 60(SP), R9 + XORL 48(SP), R9 + XORL 28(SP), R9 + XORL 4(SP), R9 + ROLL $+1, R9 + MOVL R9, 60(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1859775393(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 60(SP), R9 + MOVL R9, 124(R8) + + // ROUND2(32) + // SHUFFLE + MOVL (SP), R9 + XORL 52(SP), R9 + XORL 32(SP), R9 + XORL 8(SP), R9 + ROLL $+1, R9 + MOVL R9, (SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1859775393(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL (SP), R9 + MOVL R9, 128(R8) + + // ROUND2(33) + // SHUFFLE + MOVL 4(SP), R9 + XORL 56(SP), R9 + XORL 36(SP), R9 + XORL 12(SP), R9 + ROLL $+1, R9 + MOVL R9, 4(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1859775393(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 4(SP), R9 + MOVL R9, 132(R8) + + // ROUND2(34) + // SHUFFLE + MOVL 8(SP), R9 + XORL 60(SP), R9 + XORL 40(SP), R9 + XORL 16(SP), R9 + ROLL $+1, R9 + MOVL R9, 8(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1859775393(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 8(SP), R9 + MOVL R9, 136(R8) + + // ROUND2(35) + // SHUFFLE + MOVL 12(SP), R9 + XORL (SP), R9 + XORL 44(SP), R9 + XORL 20(SP), R9 + ROLL $+1, R9 + MOVL R9, 12(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 1859775393(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 12(SP), R9 + MOVL R9, 140(R8) + + // ROUND2(36) + // SHUFFLE + MOVL 16(SP), R9 + XORL 4(SP), R9 + XORL 48(SP), R9 + XORL 24(SP), R9 + ROLL $+1, R9 + MOVL R9, 16(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 1859775393(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 16(SP), R9 + MOVL R9, 144(R8) + + // ROUND2(37) + // SHUFFLE + MOVL 20(SP), R9 + XORL 8(SP), R9 + XORL 52(SP), R9 + XORL 28(SP), R9 + ROLL $+1, R9 + MOVL R9, 20(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 1859775393(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 20(SP), R9 + MOVL R9, 148(R8) + + // ROUND2(38) + // SHUFFLE + MOVL 24(SP), R9 + XORL 12(SP), R9 + XORL 56(SP), R9 + XORL 32(SP), R9 + ROLL $+1, R9 + MOVL R9, 24(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 1859775393(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 24(SP), R9 + MOVL R9, 152(R8) + + // ROUND2(39) + // SHUFFLE + MOVL 28(SP), R9 + XORL 16(SP), R9 + XORL 60(SP), R9 + XORL 36(SP), R9 + ROLL $+1, R9 + MOVL R9, 28(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 1859775393(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 28(SP), R9 + MOVL R9, 156(R8) + + // ROUND3 (steps 40-59) + // ROUND3(40) + // SHUFFLE + MOVL 32(SP), R9 + XORL 20(SP), R9 + XORL (SP), R9 + XORL 40(SP), R9 + ROLL $+1, R9 + MOVL R9, 32(SP) + + // FUNC3 + MOVL R11, R8 + ORL R12, R8 + ANDL R13, R8 + MOVL R11, R15 + ANDL R12, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 2400959708(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 32(SP), R9 + MOVL R9, 160(R8) + + // ROUND3(41) + // SHUFFLE + MOVL 36(SP), R9 + XORL 24(SP), R9 + XORL 4(SP), R9 + XORL 44(SP), R9 + ROLL $+1, R9 + MOVL R9, 36(SP) + + // FUNC3 + MOVL R10, R8 + ORL R11, R8 + ANDL R12, R8 + MOVL R10, R15 + ANDL R11, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 2400959708(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 36(SP), R9 + MOVL R9, 164(R8) + + // ROUND3(42) + // SHUFFLE + MOVL 40(SP), R9 + XORL 28(SP), R9 + XORL 8(SP), R9 + XORL 48(SP), R9 + ROLL $+1, R9 + MOVL R9, 40(SP) + + // FUNC3 + MOVL R14, R8 + ORL R10, R8 + ANDL R11, R8 + MOVL R14, R15 + ANDL R10, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 2400959708(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 40(SP), R9 + MOVL R9, 168(R8) + + // ROUND3(43) + // SHUFFLE + MOVL 44(SP), R9 + XORL 32(SP), R9 + XORL 12(SP), R9 + XORL 52(SP), R9 + ROLL $+1, R9 + MOVL R9, 44(SP) + + // FUNC3 + MOVL R13, R8 + ORL R14, R8 + ANDL R10, R8 + MOVL R13, R15 + ANDL R14, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 2400959708(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 44(SP), R9 + MOVL R9, 172(R8) + + // ROUND3(44) + // SHUFFLE + MOVL 48(SP), R9 + XORL 36(SP), R9 + XORL 16(SP), R9 + XORL 56(SP), R9 + ROLL $+1, R9 + MOVL R9, 48(SP) + + // FUNC3 + MOVL R12, R8 + ORL R13, R8 + ANDL R14, R8 + MOVL R12, R15 + ANDL R13, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 2400959708(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 48(SP), R9 + MOVL R9, 176(R8) + + // ROUND3(45) + // SHUFFLE + MOVL 52(SP), R9 + XORL 40(SP), R9 + XORL 20(SP), R9 + XORL 60(SP), R9 + ROLL $+1, R9 + MOVL R9, 52(SP) + + // FUNC3 + MOVL R11, R8 + ORL R12, R8 + ANDL R13, R8 + MOVL R11, R15 + ANDL R12, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 2400959708(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 52(SP), R9 + MOVL R9, 180(R8) + + // ROUND3(46) + // SHUFFLE + MOVL 56(SP), R9 + XORL 44(SP), R9 + XORL 24(SP), R9 + XORL (SP), R9 + ROLL $+1, R9 + MOVL R9, 56(SP) + + // FUNC3 + MOVL R10, R8 + ORL R11, R8 + ANDL R12, R8 + MOVL R10, R15 + ANDL R11, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 2400959708(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 56(SP), R9 + MOVL R9, 184(R8) + + // ROUND3(47) + // SHUFFLE + MOVL 60(SP), R9 + XORL 48(SP), R9 + XORL 28(SP), R9 + XORL 4(SP), R9 + ROLL $+1, R9 + MOVL R9, 60(SP) + + // FUNC3 + MOVL R14, R8 + ORL R10, R8 + ANDL R11, R8 + MOVL R14, R15 + ANDL R10, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 2400959708(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 60(SP), R9 + MOVL R9, 188(R8) + + // ROUND3(48) + // SHUFFLE + MOVL (SP), R9 + XORL 52(SP), R9 + XORL 32(SP), R9 + XORL 8(SP), R9 + ROLL $+1, R9 + MOVL R9, (SP) + + // FUNC3 + MOVL R13, R8 + ORL R14, R8 + ANDL R10, R8 + MOVL R13, R15 + ANDL R14, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 2400959708(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL (SP), R9 + MOVL R9, 192(R8) + + // ROUND3(49) + // SHUFFLE + MOVL 4(SP), R9 + XORL 56(SP), R9 + XORL 36(SP), R9 + XORL 12(SP), R9 + ROLL $+1, R9 + MOVL R9, 4(SP) + + // FUNC3 + MOVL R12, R8 + ORL R13, R8 + ANDL R14, R8 + MOVL R12, R15 + ANDL R13, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 2400959708(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 4(SP), R9 + MOVL R9, 196(R8) + + // ROUND3(50) + // SHUFFLE + MOVL 8(SP), R9 + XORL 60(SP), R9 + XORL 40(SP), R9 + XORL 16(SP), R9 + ROLL $+1, R9 + MOVL R9, 8(SP) + + // FUNC3 + MOVL R11, R8 + ORL R12, R8 + ANDL R13, R8 + MOVL R11, R15 + ANDL R12, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 2400959708(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 8(SP), R9 + MOVL R9, 200(R8) + + // ROUND3(51) + // SHUFFLE + MOVL 12(SP), R9 + XORL (SP), R9 + XORL 44(SP), R9 + XORL 20(SP), R9 + ROLL $+1, R9 + MOVL R9, 12(SP) + + // FUNC3 + MOVL R10, R8 + ORL R11, R8 + ANDL R12, R8 + MOVL R10, R15 + ANDL R11, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 2400959708(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 12(SP), R9 + MOVL R9, 204(R8) + + // ROUND3(52) + // SHUFFLE + MOVL 16(SP), R9 + XORL 4(SP), R9 + XORL 48(SP), R9 + XORL 24(SP), R9 + ROLL $+1, R9 + MOVL R9, 16(SP) + + // FUNC3 + MOVL R14, R8 + ORL R10, R8 + ANDL R11, R8 + MOVL R14, R15 + ANDL R10, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 2400959708(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 16(SP), R9 + MOVL R9, 208(R8) + + // ROUND3(53) + // SHUFFLE + MOVL 20(SP), R9 + XORL 8(SP), R9 + XORL 52(SP), R9 + XORL 28(SP), R9 + ROLL $+1, R9 + MOVL R9, 20(SP) + + // FUNC3 + MOVL R13, R8 + ORL R14, R8 + ANDL R10, R8 + MOVL R13, R15 + ANDL R14, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 2400959708(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 20(SP), R9 + MOVL R9, 212(R8) + + // ROUND3(54) + // SHUFFLE + MOVL 24(SP), R9 + XORL 12(SP), R9 + XORL 56(SP), R9 + XORL 32(SP), R9 + ROLL $+1, R9 + MOVL R9, 24(SP) + + // FUNC3 + MOVL R12, R8 + ORL R13, R8 + ANDL R14, R8 + MOVL R12, R15 + ANDL R13, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 2400959708(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 24(SP), R9 + MOVL R9, 216(R8) + + // ROUND3(55) + // SHUFFLE + MOVL 28(SP), R9 + XORL 16(SP), R9 + XORL 60(SP), R9 + XORL 36(SP), R9 + ROLL $+1, R9 + MOVL R9, 28(SP) + + // FUNC3 + MOVL R11, R8 + ORL R12, R8 + ANDL R13, R8 + MOVL R11, R15 + ANDL R12, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 2400959708(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 28(SP), R9 + MOVL R9, 220(R8) + + // ROUND3(56) + // SHUFFLE + MOVL 32(SP), R9 + XORL 20(SP), R9 + XORL (SP), R9 + XORL 40(SP), R9 + ROLL $+1, R9 + MOVL R9, 32(SP) + + // FUNC3 + MOVL R10, R8 + ORL R11, R8 + ANDL R12, R8 + MOVL R10, R15 + ANDL R11, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 2400959708(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 32(SP), R9 + MOVL R9, 224(R8) + + // ROUND3(57) + // SHUFFLE + MOVL 36(SP), R9 + XORL 24(SP), R9 + XORL 4(SP), R9 + XORL 44(SP), R9 + ROLL $+1, R9 + MOVL R9, 36(SP) + + // FUNC3 + MOVL R14, R8 + ORL R10, R8 + ANDL R11, R8 + MOVL R14, R15 + ANDL R10, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 2400959708(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 36(SP), R9 + MOVL R9, 228(R8) + + // Load cs + MOVQ cs_base+56(FP), R8 + MOVL R12, 20(R8) + MOVL R13, 24(R8) + MOVL R14, 28(R8) + MOVL R10, 32(R8) + MOVL R11, 36(R8) + + // ROUND3(58) + // SHUFFLE + MOVL 40(SP), R9 + XORL 28(SP), R9 + XORL 8(SP), R9 + XORL 48(SP), R9 + ROLL $+1, R9 + MOVL R9, 40(SP) + + // FUNC3 + MOVL R13, R8 + ORL R14, R8 + ANDL R10, R8 + MOVL R13, R15 + ANDL R14, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 2400959708(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 40(SP), R9 + MOVL R9, 232(R8) + + // ROUND3(59) + // SHUFFLE + MOVL 44(SP), R9 + XORL 32(SP), R9 + XORL 12(SP), R9 + XORL 52(SP), R9 + ROLL $+1, R9 + MOVL R9, 44(SP) + + // FUNC3 + MOVL R12, R8 + ORL R13, R8 + ANDL R14, R8 + MOVL R12, R15 + ANDL R13, R15 + ORL R8, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 2400959708(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 44(SP), R9 + MOVL R9, 236(R8) + + // ROUND4 (steps 60-79) + // ROUND4(60) + // SHUFFLE + MOVL 48(SP), R9 + XORL 36(SP), R9 + XORL 16(SP), R9 + XORL 56(SP), R9 + ROLL $+1, R9 + MOVL R9, 48(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 3395469782(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 48(SP), R9 + MOVL R9, 240(R8) + + // ROUND4(61) + // SHUFFLE + MOVL 52(SP), R9 + XORL 40(SP), R9 + XORL 20(SP), R9 + XORL 60(SP), R9 + ROLL $+1, R9 + MOVL R9, 52(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 3395469782(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 52(SP), R9 + MOVL R9, 244(R8) + + // ROUND4(62) + // SHUFFLE + MOVL 56(SP), R9 + XORL 44(SP), R9 + XORL 24(SP), R9 + XORL (SP), R9 + ROLL $+1, R9 + MOVL R9, 56(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 3395469782(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 56(SP), R9 + MOVL R9, 248(R8) + + // ROUND4(63) + // SHUFFLE + MOVL 60(SP), R9 + XORL 48(SP), R9 + XORL 28(SP), R9 + XORL 4(SP), R9 + ROLL $+1, R9 + MOVL R9, 60(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 3395469782(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 60(SP), R9 + MOVL R9, 252(R8) + + // ROUND4(64) + // SHUFFLE + MOVL (SP), R9 + XORL 52(SP), R9 + XORL 32(SP), R9 + XORL 8(SP), R9 + ROLL $+1, R9 + MOVL R9, (SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 3395469782(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL (SP), R9 + MOVL R9, 256(R8) + + // Load cs + MOVQ cs_base+56(FP), R8 + MOVL R10, 40(R8) + MOVL R11, 44(R8) + MOVL R12, 48(R8) + MOVL R13, 52(R8) + MOVL R14, 56(R8) + + // ROUND4(65) + // SHUFFLE + MOVL 4(SP), R9 + XORL 56(SP), R9 + XORL 36(SP), R9 + XORL 12(SP), R9 + ROLL $+1, R9 + MOVL R9, 4(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 3395469782(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 4(SP), R9 + MOVL R9, 260(R8) + + // ROUND4(66) + // SHUFFLE + MOVL 8(SP), R9 + XORL 60(SP), R9 + XORL 40(SP), R9 + XORL 16(SP), R9 + ROLL $+1, R9 + MOVL R9, 8(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 3395469782(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 8(SP), R9 + MOVL R9, 264(R8) + + // ROUND4(67) + // SHUFFLE + MOVL 12(SP), R9 + XORL (SP), R9 + XORL 44(SP), R9 + XORL 20(SP), R9 + ROLL $+1, R9 + MOVL R9, 12(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 3395469782(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 12(SP), R9 + MOVL R9, 268(R8) + + // ROUND4(68) + // SHUFFLE + MOVL 16(SP), R9 + XORL 4(SP), R9 + XORL 48(SP), R9 + XORL 24(SP), R9 + ROLL $+1, R9 + MOVL R9, 16(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 3395469782(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 16(SP), R9 + MOVL R9, 272(R8) + + // ROUND4(69) + // SHUFFLE + MOVL 20(SP), R9 + XORL 8(SP), R9 + XORL 52(SP), R9 + XORL 28(SP), R9 + ROLL $+1, R9 + MOVL R9, 20(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 3395469782(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 20(SP), R9 + MOVL R9, 276(R8) + + // ROUND4(70) + // SHUFFLE + MOVL 24(SP), R9 + XORL 12(SP), R9 + XORL 56(SP), R9 + XORL 32(SP), R9 + ROLL $+1, R9 + MOVL R9, 24(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 3395469782(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 24(SP), R9 + MOVL R9, 280(R8) + + // ROUND4(71) + // SHUFFLE + MOVL 28(SP), R9 + XORL 16(SP), R9 + XORL 60(SP), R9 + XORL 36(SP), R9 + ROLL $+1, R9 + MOVL R9, 28(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 3395469782(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 28(SP), R9 + MOVL R9, 284(R8) + + // ROUND4(72) + // SHUFFLE + MOVL 32(SP), R9 + XORL 20(SP), R9 + XORL (SP), R9 + XORL 40(SP), R9 + ROLL $+1, R9 + MOVL R9, 32(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 3395469782(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 32(SP), R9 + MOVL R9, 288(R8) + + // ROUND4(73) + // SHUFFLE + MOVL 36(SP), R9 + XORL 24(SP), R9 + XORL 4(SP), R9 + XORL 44(SP), R9 + ROLL $+1, R9 + MOVL R9, 36(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 3395469782(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 36(SP), R9 + MOVL R9, 292(R8) + + // ROUND4(74) + // SHUFFLE + MOVL 40(SP), R9 + XORL 28(SP), R9 + XORL 8(SP), R9 + XORL 48(SP), R9 + ROLL $+1, R9 + MOVL R9, 40(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 3395469782(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 40(SP), R9 + MOVL R9, 296(R8) + + // ROUND4(75) + // SHUFFLE + MOVL 44(SP), R9 + XORL 32(SP), R9 + XORL 12(SP), R9 + XORL 52(SP), R9 + ROLL $+1, R9 + MOVL R9, 44(SP) + + // FUNC2 + MOVL R11, R15 + XORL R12, R15 + XORL R13, R15 + + // MIX + ROLL $+30, R11 + ADDL R15, R14 + MOVL R10, R8 + ROLL $+5, R8 + LEAL 3395469782(R14)(R9*1), R14 + ADDL R8, R14 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 44(SP), R9 + MOVL R9, 300(R8) + + // ROUND4(76) + // SHUFFLE + MOVL 48(SP), R9 + XORL 36(SP), R9 + XORL 16(SP), R9 + XORL 56(SP), R9 + ROLL $+1, R9 + MOVL R9, 48(SP) + + // FUNC2 + MOVL R10, R15 + XORL R11, R15 + XORL R12, R15 + + // MIX + ROLL $+30, R10 + ADDL R15, R13 + MOVL R14, R8 + ROLL $+5, R8 + LEAL 3395469782(R13)(R9*1), R13 + ADDL R8, R13 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 48(SP), R9 + MOVL R9, 304(R8) + + // ROUND4(77) + // SHUFFLE + MOVL 52(SP), R9 + XORL 40(SP), R9 + XORL 20(SP), R9 + XORL 60(SP), R9 + ROLL $+1, R9 + MOVL R9, 52(SP) + + // FUNC2 + MOVL R14, R15 + XORL R10, R15 + XORL R11, R15 + + // MIX + ROLL $+30, R14 + ADDL R15, R12 + MOVL R13, R8 + ROLL $+5, R8 + LEAL 3395469782(R12)(R9*1), R12 + ADDL R8, R12 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 52(SP), R9 + MOVL R9, 308(R8) + + // ROUND4(78) + // SHUFFLE + MOVL 56(SP), R9 + XORL 44(SP), R9 + XORL 24(SP), R9 + XORL (SP), R9 + ROLL $+1, R9 + MOVL R9, 56(SP) + + // FUNC2 + MOVL R13, R15 + XORL R14, R15 + XORL R10, R15 + + // MIX + ROLL $+30, R13 + ADDL R15, R11 + MOVL R12, R8 + ROLL $+5, R8 + LEAL 3395469782(R11)(R9*1), R11 + ADDL R8, R11 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 56(SP), R9 + MOVL R9, 312(R8) + + // ROUND4(79) + // SHUFFLE + MOVL 60(SP), R9 + XORL 48(SP), R9 + XORL 28(SP), R9 + XORL 4(SP), R9 + ROLL $+1, R9 + MOVL R9, 60(SP) + + // FUNC2 + MOVL R12, R15 + XORL R13, R15 + XORL R14, R15 + + // MIX + ROLL $+30, R12 + ADDL R15, R10 + MOVL R11, R8 + ROLL $+5, R8 + LEAL 3395469782(R10)(R9*1), R10 + ADDL R8, R10 + + // Load m1 + MOVQ m1_base+32(FP), R8 + MOVL 60(SP), R9 + MOVL R9, 316(R8) + + // Add registers to temp hash. + ADDL R10, AX + ADDL R11, BX + ADDL R12, CX + ADDL R13, DX + ADDL R14, BP + ADDQ $+64, DI + CMPQ DI, SI + JB loop + +end: + MOVQ dig+0(FP), SI + MOVL AX, (SI) + MOVL BX, 4(SI) + MOVL CX, 8(SI) + MOVL DX, 12(SI) + MOVL BP, 16(SI) + RET diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go new file mode 100644 index 00000000..ba8b96e8 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go @@ -0,0 +1,268 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Originally from: https://github.com/go/blob/master/src/crypto/sha1/sha1block.go +// It has been modified to support collision detection. + +package sha1cd + +import ( + "fmt" + "math/bits" + + shared "github.com/pjbgf/sha1cd/internal" + "github.com/pjbgf/sha1cd/ubc" +) + +// blockGeneric is a portable, pure Go version of the SHA-1 block step. +// It's used by sha1block_generic.go and tests. +func blockGeneric(dig *digest, p []byte) { + var w [16]uint32 + + // cs stores the pre-step compression state for only the steps required for the + // collision detection, which are 0, 58 and 65. + // Refer to ubc/const.go for more details. + cs := [shared.PreStepState][shared.WordBuffers]uint32{} + + h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] + for len(p) >= shared.Chunk { + m1 := [shared.Rounds]uint32{} + hi := 1 + + // Collision attacks are thwarted by hashing a detected near-collision block 3 times. + // Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks: + // The best collision attacks against SHA-1 have complexity about 2^60, + // thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180. + // An attacker would be better off using a generic birthday search of complexity 2^80. + rehash: + a, b, c, d, e := h0, h1, h2, h3, h4 + + // Each of the four 20-iteration rounds + // differs only in the computation of f and + // the choice of K (K0, K1, etc). + i := 0 + + // Store pre-step compression state for the collision detection. + cs[0] = [shared.WordBuffers]uint32{a, b, c, d, e} + + for ; i < 16; i++ { + // load step + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + + f := b&c | (^b)&d + t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + + // Store compression state for the collision detection. + m1[i] = w[i&0xf] + } + for ; i < 20; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + + f := b&c | (^b)&d + t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + + // Store compression state for the collision detection. + m1[i] = w[i&0xf] + } + for ; i < 40; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + + f := b ^ c ^ d + t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K1 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + + // Store compression state for the collision detection. + m1[i] = w[i&0xf] + } + for ; i < 60; i++ { + if i == 58 { + // Store pre-step compression state for the collision detection. + cs[1] = [shared.WordBuffers]uint32{a, b, c, d, e} + } + + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + + f := ((b | c) & d) | (b & c) + t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K2 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + + // Store compression state for the collision detection. + m1[i] = w[i&0xf] + } + for ; i < 80; i++ { + if i == 65 { + // Store pre-step compression state for the collision detection. + cs[2] = [shared.WordBuffers]uint32{a, b, c, d, e} + } + + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + + f := b ^ c ^ d + t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K3 + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + + // Store compression state for the collision detection. + m1[i] = w[i&0xf] + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + + if hi == 2 { + hi++ + goto rehash + } + + if hi == 1 { + col := checkCollision(m1, cs, [shared.WordBuffers]uint32{h0, h1, h2, h3, h4}) + if col { + dig.col = true + hi++ + goto rehash + } + } + + p = p[shared.Chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4 +} + +func checkCollision( + m1 [shared.Rounds]uint32, + cs [shared.PreStepState][shared.WordBuffers]uint32, + state [shared.WordBuffers]uint32) bool { + + if mask := ubc.CalculateDvMask(m1); mask != 0 { + dvs := ubc.SHA1_dvs() + + for i := 0; dvs[i].DvType != 0; i++ { + if (mask & ((uint32)(1) << uint32(dvs[i].MaskB))) != 0 { + var csState [shared.WordBuffers]uint32 + switch dvs[i].TestT { + case 58: + csState = cs[1] + case 65: + csState = cs[2] + case 0: + csState = cs[0] + default: + panic(fmt.Sprintf("dvs data is trying to use a testT that isn't available: %d", dvs[i].TestT)) + } + + col := hasCollided( + dvs[i].TestT, // testT is the step number + // m2 is a secondary message created XORing with + // ubc's DM prior to the SHA recompression step. + m1, dvs[i].Dm, + csState, + state) + + if col { + return true + } + } + } + } + return false +} + +func hasCollided(step uint32, m1, dm [shared.Rounds]uint32, + state [shared.WordBuffers]uint32, h [shared.WordBuffers]uint32) bool { + // Intermediary Hash Value. + ihv := [shared.WordBuffers]uint32{} + + a, b, c, d, e := state[0], state[1], state[2], state[3], state[4] + + // Walk backwards from current step to undo previous compression. + // The existing collision detection does not have dvs higher than 65, + // start value of i accordingly. + for i := uint32(64); i >= 60; i-- { + a, b, c, d, e = b, c, d, e, a + if step > i { + b = bits.RotateLeft32(b, -30) + f := b ^ c ^ d + e -= bits.RotateLeft32(a, 5) + f + shared.K3 + (m1[i] ^ dm[i]) // m2 = m1 ^ dm. + } + } + for i := uint32(59); i >= 40; i-- { + a, b, c, d, e = b, c, d, e, a + if step > i { + b = bits.RotateLeft32(b, -30) + f := ((b | c) & d) | (b & c) + e -= bits.RotateLeft32(a, 5) + f + shared.K2 + (m1[i] ^ dm[i]) + } + } + for i := uint32(39); i >= 20; i-- { + a, b, c, d, e = b, c, d, e, a + if step > i { + b = bits.RotateLeft32(b, -30) + f := b ^ c ^ d + e -= bits.RotateLeft32(a, 5) + f + shared.K1 + (m1[i] ^ dm[i]) + } + } + for i := uint32(20); i > 0; i-- { + j := i - 1 + a, b, c, d, e = b, c, d, e, a + if step > j { + b = bits.RotateLeft32(b, -30) // undo the rotate left + f := b&c | (^b)&d + // subtract from e + e -= bits.RotateLeft32(a, 5) + f + shared.K0 + (m1[j] ^ dm[j]) + } + } + + ihv[0] = a + ihv[1] = b + ihv[2] = c + ihv[3] = d + ihv[4] = e + a = state[0] + b = state[1] + c = state[2] + d = state[3] + e = state[4] + + // Recompress blocks based on the current step. + // The existing collision detection does not have dvs below 58, so they have been removed + // from the source code. If new dvs are added which target rounds below 40, that logic + // will need to be readded here. + for i := uint32(40); i < 60; i++ { + if step <= i { + f := ((b | c) & d) | (b & c) + t := bits.RotateLeft32(a, 5) + f + e + shared.K2 + (m1[i] ^ dm[i]) + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + } + } + for i := uint32(60); i < 80; i++ { + if step <= i { + f := b ^ c ^ d + t := bits.RotateLeft32(a, 5) + f + e + shared.K3 + (m1[i] ^ dm[i]) + a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d + } + } + + ihv[0] += a + ihv[1] += b + ihv[2] += c + ihv[3] += d + ihv[4] += e + + if ((ihv[0] ^ h[0]) | (ihv[1] ^ h[1]) | + (ihv[2] ^ h[2]) | (ihv[3] ^ h[3]) | (ihv[4] ^ h[4])) == 0 { + return true + } + + return false +} diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go new file mode 100644 index 00000000..15bae5a7 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go @@ -0,0 +1,8 @@ +//go:build !amd64 || noasm || !gc +// +build !amd64 noasm !gc + +package sha1cd + +func block(dig *digest, p []byte) { + blockGeneric(dig, p) +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/BUILD.bazel b/vendor/github.com/pjbgf/sha1cd/ubc/BUILD.bazel new file mode 100644 index 00000000..b431ea94 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ubc", + srcs = [ + "check.go", + "const.go", + "doc.go", + ], + importmap = "peridot.resf.org/vendor/github.com/pjbgf/sha1cd/ubc", + importpath = "github.com/pjbgf/sha1cd/ubc", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/check.go b/vendor/github.com/pjbgf/sha1cd/ubc/check.go new file mode 100644 index 00000000..167a5558 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/check.go @@ -0,0 +1,368 @@ +// Based on the C implementation from Marc Stevens and Dan Shumow. +// https://github.com/cr-marcstevens/sha1collisiondetection + +package ubc + +type DvInfo struct { + // DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper). + // https://marc-stevens.nl/research/papers/C13-S.pdf + DvType uint32 + DvK uint32 + DvB uint32 + + // TestT is the step to do the recompression from for collision detection. + TestT uint32 + + // MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check. + MaskI uint32 + MaskB uint32 + + // Dm is the expanded message block XOR-difference defined by the DV. + Dm [80]uint32 +} + +// CalculateDvMask takes as input an expanded message block and verifies the unavoidable bitconditions +// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all +// unavoidable bitconditions for that DV have been met. +// Thus, one needs to do the recompression check for each DV that has its bit set. +func CalculateDvMask(W [80]uint32) uint32 { + mask := uint32(0xFFFFFFFF) + mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) + mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) + mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) + mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) + mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) + mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) + mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) + mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) + mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) + mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) + mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) + mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) + mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) + mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) + mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) + mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) + mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) + + if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + } + mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { + mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { + mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { + mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) + } + if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { + mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + } + if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { + mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) + } + if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { + mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { + mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) + } + if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { + mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) + } + mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) + if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { + mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) + } + mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) + if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { + mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) + } + mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) + mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) + if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) + } + mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) + if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) + } + if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + } + if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { + mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + } + mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { + mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { + mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) + } + if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) + } + if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { + mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) + } + mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { + mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) + } + if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { + mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { + mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { + mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { + mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) + } + if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { + mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) + } + if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { + mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) + } + mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { + mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) + } + mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) + mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) + mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) + mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) + mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) + mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) + mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) + mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) + mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) + mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) + if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { + mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) + } + if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { + mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) + } + if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { + mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) + } + if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { + mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) + } + if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + } + mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) + if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { + mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) + } + + if mask != 0 { + if (mask & DV_I_43_0_bit) != 0 { + if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || + not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || + not((W[58]^(W[63]>>30))&(1<<0)) != 0 { + mask &= ^DV_I_43_0_bit + } + } + if (mask & DV_I_44_0_bit) != 0 { + if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || + not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || + not((W[59]^(W[64]>>30))&(1<<0)) != 0 { + mask &= ^DV_I_44_0_bit + } + } + if (mask & DV_I_46_2_bit) != 0 { + mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) + } + if (mask & DV_I_47_2_bit) != 0 { + if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || + not(not((W[41]^W[43])&(1<<6))) != 0 { + mask &= ^DV_I_47_2_bit + } + } + if (mask & DV_I_48_2_bit) != 0 { + if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || + not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { + mask &= ^DV_I_48_2_bit + } + } + if (mask & DV_I_49_2_bit) != 0 { + if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || + not((W[42]^W[50])&(1<<1)) != 0 || + not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || + not((W[38]^W[40])&(1<<1)) != 0 { + mask &= ^DV_I_49_2_bit + } + } + if (mask & DV_I_50_0_bit) != 0 { + mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) + } + if (mask & DV_I_50_2_bit) != 0 { + mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) + } + if (mask & DV_I_51_0_bit) != 0 { + mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) + } + if (mask & DV_I_51_2_bit) != 0 { + if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || + not(not((W[49]^W[51])&(1<<6))) != 0 || + not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || + not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { + mask &= ^DV_I_51_2_bit + } + } + if (mask & DV_I_52_0_bit) != 0 { + mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) + } + if (mask & DV_II_46_2_bit) != 0 { + mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) + } + if (mask & DV_II_48_0_bit) != 0 { + if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || + not((W[35]^(W[40]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_48_0_bit + } + } + if (mask & DV_II_49_0_bit) != 0 { + if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || + not((W[36]^(W[41]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_49_0_bit + } + } + if (mask & DV_II_49_2_bit) != 0 { + if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || + not(not((W[51]^W[53])&(1<<6))) != 0 || + not((W[50]^W[54])&(1<<1)) != 0 || + not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || + not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || + not((W[36]^(W[41]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_49_2_bit + } + } + if (mask & DV_II_50_0_bit) != 0 { + if not((W[55]^W[58])&(1<<29)) != 0 || + not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || + not((W[37]^(W[42]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_50_0_bit + } + } + if (mask & DV_II_50_2_bit) != 0 { + if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || + not(not((W[52]^W[54])&(1<<6))) != 0 || + not((W[51]^W[55])&(1<<1)) != 0 || + not((W[45]^W[47])&(1<<1)) != 0 || + not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || + not((W[37]^(W[42]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_50_2_bit + } + } + if (mask & DV_II_51_0_bit) != 0 { + if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || + not((W[38]^(W[43]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_51_0_bit + } + } + if (mask & DV_II_51_2_bit) != 0 { + if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || + not(not((W[53]^W[55])&(1<<6))) != 0 || + not((W[52]^W[56])&(1<<1)) != 0 || + not((W[46]^W[48])&(1<<1)) != 0 || + not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || + not((W[38]^(W[43]>>30))&(1<<0)) != 0 { + mask &= ^DV_II_51_2_bit + } + } + if (mask & DV_II_52_0_bit) != 0 { + if not(not((W[59]^W[60])&(1<<29))) != 0 || + not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || + not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || + not((W[39]^(W[44]<<2))&(1<<30)) != 0 { + mask &= ^DV_II_52_0_bit + } + } + if (mask & DV_II_53_0_bit) != 0 { + if not((W[58]^W[61])&(1<<29)) != 0 || + not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || + not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || + not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_53_0_bit + } + } + if (mask & DV_II_54_0_bit) != 0 { + if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || + not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || + not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_54_0_bit + } + } + if (mask & DV_II_55_0_bit) != 0 { + if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || + not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || + not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || + not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_55_0_bit + } + } + if (mask & DV_II_56_0_bit) != 0 { + if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || + not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || + not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { + mask &= ^DV_II_56_0_bit + } + } + } + + return mask +} + +func not(x uint32) uint32 { + if x == 0 { + return 1 + } + + return 0 +} + +func SHA1_dvs() []DvInfo { + return sha1_dvs +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/const.go b/vendor/github.com/pjbgf/sha1cd/ubc/const.go new file mode 100644 index 00000000..eac14f46 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/const.go @@ -0,0 +1,624 @@ +// Based on the C implementation from Marc Stevens and Dan Shumow. +// https://github.com/cr-marcstevens/sha1collisiondetection + +package ubc + +const ( + CheckSize = 80 + + DV_I_43_0_bit = (uint32)(1 << 0) + DV_I_44_0_bit = (uint32)(1 << 1) + DV_I_45_0_bit = (uint32)(1 << 2) + DV_I_46_0_bit = (uint32)(1 << 3) + DV_I_46_2_bit = (uint32)(1 << 4) + DV_I_47_0_bit = (uint32)(1 << 5) + DV_I_47_2_bit = (uint32)(1 << 6) + DV_I_48_0_bit = (uint32)(1 << 7) + DV_I_48_2_bit = (uint32)(1 << 8) + DV_I_49_0_bit = (uint32)(1 << 9) + DV_I_49_2_bit = (uint32)(1 << 10) + DV_I_50_0_bit = (uint32)(1 << 11) + DV_I_50_2_bit = (uint32)(1 << 12) + DV_I_51_0_bit = (uint32)(1 << 13) + DV_I_51_2_bit = (uint32)(1 << 14) + DV_I_52_0_bit = (uint32)(1 << 15) + DV_II_45_0_bit = (uint32)(1 << 16) + DV_II_46_0_bit = (uint32)(1 << 17) + DV_II_46_2_bit = (uint32)(1 << 18) + DV_II_47_0_bit = (uint32)(1 << 19) + DV_II_48_0_bit = (uint32)(1 << 20) + DV_II_49_0_bit = (uint32)(1 << 21) + DV_II_49_2_bit = (uint32)(1 << 22) + DV_II_50_0_bit = (uint32)(1 << 23) + DV_II_50_2_bit = (uint32)(1 << 24) + DV_II_51_0_bit = (uint32)(1 << 25) + DV_II_51_2_bit = (uint32)(1 << 26) + DV_II_52_0_bit = (uint32)(1 << 27) + DV_II_53_0_bit = (uint32)(1 << 28) + DV_II_54_0_bit = (uint32)(1 << 29) + DV_II_55_0_bit = (uint32)(1 << 30) + DV_II_56_0_bit = (uint32)(1 << 31) +) + +// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) which defines the +// unavoidable bit conditions when a collision attack is in progress. +var sha1_dvs = []DvInfo{ + { + DvType: 1, DvK: 43, DvB: 0, TestT: 58, MaskI: 0, MaskB: 0, + Dm: [CheckSize]uint32{ + 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, + 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, + 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, + 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, + 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, + 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, + 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, + 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, + 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6, 0x8000004c, + 0x00000803, 0x80000161, 0x80000599}, + }, { + DvType: 1, DvK: 44, DvB: 0, TestT: 58, MaskI: 0, MaskB: 1, + Dm: [CheckSize]uint32{ + 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, + 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, + 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, + 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, + 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, + 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, + 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, + 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, + 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6, + 0x8000004c, 0x00000803, 0x80000161}, + }, + { + DvType: 1, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 2, + Dm: [CheckSize]uint32{ + 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, + 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, + 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, + 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, + 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, + 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, + 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, + 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, + 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, + 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, + 0x800000e6, 0x8000004c, 0x00000803}, + }, + { + DvType: 1, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 3, + Dm: [CheckSize]uint32{ + 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, + 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, + 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, + 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, + 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, + 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, + 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, + 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, + 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, + 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, + 0x00000408, 0x800000e6, 0x8000004c}, + }, + { + DvType: 1, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 4, + Dm: [CheckSize]uint32{ + 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043, + 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003, + 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, + 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003, + 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, + 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002, + 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000, + 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002, + 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101, + 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, + 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590, 0x00001020, + 0x0000039a, 0x00000132}, + }, + { + DvType: 1, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 5, + Dm: [CheckSize]uint32{ + 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, + 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008, + 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, + 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, + 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, + 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, + 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, + 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, + 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, + 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, + 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, + 0x00000408, 0x800000e6}, + }, + { + DvType: 1, DvK: 47, DvB: 2, TestT: 58, MaskI: 0, MaskB: 6, + Dm: [CheckSize]uint32{ + 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, + 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020, + 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, + 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022, + 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, + 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002, + 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040, + 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040, + 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, + 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124, + 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590, + 0x00001020, 0x0000039a, + }, + }, + { + DvType: 1, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 7, + Dm: [CheckSize]uint32{ + 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, + 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, + 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, + 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, + 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, + 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, + 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, + 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000, + 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, + 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, + 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, + 0x00000164, 0x00000408, + }, + }, + { + DvType: 1, DvK: 48, DvB: 2, TestT: 58, MaskI: 0, MaskB: 8, + Dm: [CheckSize]uint32{ + 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, + 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001, + 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, + 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043, + 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, + 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042, + 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002, + 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000, + 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004, + 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a, + 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, + 0x00000590, 0x00001020}, + }, + { + DvType: 1, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 9, + Dm: [CheckSize]uint32{ + 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, + 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, + 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, + 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, + 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, + 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, + 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000, + 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, + 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, + 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, + 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, + 0x00000018, 0x00000164}, + }, + { + DvType: 1, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 10, + Dm: [CheckSize]uint32{ + 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, + 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002, + 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052, + 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042, + 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, + 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042, + 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000, + 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, + 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, + 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202, + 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, + 0x00000060, 0x00000590}, + }, + { + DvType: 1, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 11, + Dm: [CheckSize]uint32{ + 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, + 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, + 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, + 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, + 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, + 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000, + 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, + 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, + 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, + 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, + 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012, + 0x80000202, 0x00000018, + }, + }, + { + DvType: 1, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 12, + Dm: [CheckSize]uint32{ + 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, + 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042, + 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040, + 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001, + 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043, + 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001, + 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002, + 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000, + 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000, + 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, + 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012, + 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, + 0x0000080a, 0x00000060}, + }, + { + DvType: 1, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 13, + Dm: [CheckSize]uint32{ + 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, + 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, + 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, + 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, + 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, + 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, + 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, + 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, + 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, + 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002, + 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, + 0x80000012, 0x80000202}, + }, + { + DvType: 1, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 14, + Dm: [CheckSize]uint32{ + 0xa0000003, 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, + 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040, + 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052, + 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060, + 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042, + 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062, + 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040, + 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040, + 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000, + 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009, + 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, + 0x0000004a, 0x0000080a}, + }, + { + DvType: 1, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 15, + Dm: [CheckSize]uint32{ + 0x04000010, 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, + 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, + 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, + 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000, + 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, + 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, + 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, + 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, + 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, + 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, + 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, + 0x80000009, 0x80000012}, + }, + { + DvType: 2, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 16, + Dm: [CheckSize]uint32{ + 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, + 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, + 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, + 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, + 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, + 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, + 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, + 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, + 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, + 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, + 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a, 0x000002e4, + 0x80000054, 0x00000967}, + }, + { + DvType: 2, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 17, + Dm: [CheckSize]uint32{ + 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, + 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, + 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, + 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, + 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, + 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, + 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, + 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, + 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, + 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, + 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, + 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a, + 0x000002e4, 0x80000054}, + }, + { + DvType: 2, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 18, + Dm: [CheckSize]uint32{ + 0x90000070, 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, + 0xf0000062, 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, + 0x20000050, 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, + 0xc0000032, 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, + 0x00000000, 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, + 0x80000040, 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003, + 0x40000002, 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002, + 0x00000040, 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002, + 0x00000040, 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000, + 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105, + 0x00000089, 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, + 0x00000224, 0x00000050, 0x0000092e, 0x0000046c, 0x000005b6, 0x0000106a, + 0x00000b90, 0x00000152}, + }, + { + DvType: 2, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 19, + Dm: [CheckSize]uint32{ + 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, + 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, + 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, + 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, + 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, + 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, + 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, + 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, + 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, + 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, + 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, + 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, + 0x8000041a, 0x000002e4}, + }, + { + DvType: 2, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 20, + Dm: [CheckSize]uint32{ + 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, + 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, + 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, + 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, + 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, + 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, + 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, + 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, + 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, + 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, + 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, + 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, + 0x8000016d, 0x8000041a}, + }, + { + DvType: 2, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 21, + Dm: [CheckSize]uint32{ + 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, + 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, + 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, + 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, + 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, + 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, + 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, + 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, + 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, + 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, + 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, + 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b, + 0x0000011b, 0x8000016d}, + }, + { + DvType: 2, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 22, + Dm: [CheckSize]uint32{ + 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053, 0x30000008, + 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042, 0x00000030, + 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041, 0xe0000072, + 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001, 0xc0000002, + 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000, 0x80000000, + 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, + 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040, 0xc0000002, + 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002, 0x80000000, + 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, + 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, + 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016, 0x0000020b, + 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050, 0x0000092e, + 0x0000046c, 0x000005b6}, + }, + { + DvType: 2, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 23, + Dm: [CheckSize]uint32{ + 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, + 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, + 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, + 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, + 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, + 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, + 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, + 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, + 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, + 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, + 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014, + 0x8000024b, 0x0000011b}, + }, + { + DvType: 2, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 24, + Dm: [CheckSize]uint32{ + 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053, + 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042, + 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041, + 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001, + 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000, + 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000, + 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040, + 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002, + 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, + 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, + 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016, + 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050, + 0x0000092e, 0x0000046c}, + }, + { + DvType: 2, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 25, + Dm: [CheckSize]uint32{ + 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, + 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, + 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, + 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, + 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, + 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, + 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, + 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, + 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, + 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, + 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, + 0x00000014, 0x8000024b}, + }, + { + DvType: 2, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 26, + Dm: [CheckSize]uint32{ + 0x00000043, 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, + 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, + 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, + 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, + 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000, + 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, + 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002, + 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040, + 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040, + 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089, + 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, + 0x00000050, 0x0000092e}, + }, + { + DvType: 2, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 27, + Dm: [CheckSize]uint32{ + 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, + 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, + 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, + 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, + 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, + 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, + 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, + 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, + 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, + 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, + 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, + 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, + 0x00000089, 0x00000014}, + }, + { + DvType: 2, DvK: 53, DvB: 0, TestT: 65, MaskI: 0, MaskB: 28, + Dm: [CheckSize]uint32{ + 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, + 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, + 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, + 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, + 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, + 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, + 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, + 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, + 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, + 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, + 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, + 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, + 0x80000107, 0x00000089}, + }, + { + DvType: 2, DvK: 54, DvB: 0, TestT: 65, MaskI: 0, MaskB: 29, + Dm: [CheckSize]uint32{ + 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, + 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, + 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, + 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, + 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, + 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, + 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, + 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, + 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, + 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, + 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, + 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, + 0x4000004b, 0x80000107}, + }, + { + DvType: 2, DvK: 55, DvB: 0, TestT: 65, MaskI: 0, MaskB: 30, + Dm: [CheckSize]uint32{ + 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, + 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, + 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, + 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, + 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, + 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, + 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, + 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, + 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, + 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, + 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, + 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, + 0xc0000046, 0x4000004b}, + }, + { + DvType: 2, DvK: 56, DvB: 0, TestT: 65, MaskI: 0, MaskB: 31, + Dm: [CheckSize]uint32{ + 0x2600001a, 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, + 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, + 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, + 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, + 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, + 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, + 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, + 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, + 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, + 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, + 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, + 0xc0000082, 0xc0000046}, + }, + { + DvType: 0, DvK: 0, DvB: 0, TestT: 0, MaskI: 0, MaskB: 0, + Dm: [CheckSize]uint32{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0}, + }, +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go b/vendor/github.com/pjbgf/sha1cd/ubc/doc.go new file mode 100644 index 00000000..0090e36b --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/doc.go @@ -0,0 +1,3 @@ +// ubc package provides ways for SHA1 blocks to be checked for +// Unavoidable Bit Conditions that arise from crypto analysis attacks. +package ubc diff --git a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel b/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel index 9ac103c7..31c43b17 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel +++ b/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel @@ -42,7 +42,7 @@ go_library( "//vendor/github.com/prometheus/client_model/go", "//vendor/github.com/prometheus/common/expfmt", "//vendor/github.com/prometheus/common/model", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", "@org_golang_google_protobuf//types/known/timestamppb", ] + select({ "@io_bazel_rules_go//go/platform:aix": [ diff --git a/vendor/github.com/prometheus/common/expfmt/BUILD.bazel b/vendor/github.com/prometheus/common/expfmt/BUILD.bazel index e2db937e..d1af83c5 100644 --- a/vendor/github.com/prometheus/common/expfmt/BUILD.bazel +++ b/vendor/github.com/prometheus/common/expfmt/BUILD.bazel @@ -18,6 +18,6 @@ go_library( "//vendor/github.com/prometheus/client_model/go", "//vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", "//vendor/github.com/prometheus/common/model", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", ], ) diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/file/file.go b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/file/file.go index 641cfa1c..fcf3b4f7 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/file/file.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/file/file.go @@ -22,7 +22,7 @@ package file import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" ) @@ -65,7 +65,7 @@ func (f *File) Read(path string) ([]byte, error) { return nil, err } - body, err := ioutil.ReadAll(r) + body, err := io.ReadAll(r) if err != nil { return nil, err } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/gcs/gcs.go b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/gcs/gcs.go index f8970347..be465945 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/gcs/gcs.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/gcs/gcs.go @@ -23,7 +23,7 @@ package gcs import ( "context" "fmt" - "io/ioutil" + "io" "cloud.google.com/go/storage" ) @@ -71,7 +71,7 @@ func (g *GCS) Read(path string) ([]byte, error) { return nil, err } - body, err := ioutil.ReadAll(r) + body, err := io.ReadAll(r) if err != nil { return nil, err } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/s3/s3.go b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/s3/s3.go index 1d475c16..c1907492 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/blob/s3/s3.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/blob/s3/s3.go @@ -22,7 +22,7 @@ package s3 import ( "bytes" - "io/ioutil" + "io" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -99,7 +99,7 @@ func (s *S3) Read(path string) ([]byte, error) { return nil, nil } - body, err := ioutil.ReadAll(obj.Body) + body, err := io.ReadAll(obj.Body) if err != nil { return nil, err } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/data/process.go b/vendor/github.com/rocky-linux/srpmproc/pkg/data/process.go index a3797a8d..408a7e63 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/data/process.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/data/process.go @@ -59,5 +59,6 @@ type ProcessData struct { PackageVersion string PackageRelease string TaglessMode bool - AltLookAside bool + Cdn string + ModuleBranchNames bool } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/add.go b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/add.go index 079bb85a..fac47bb3 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/add.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/add.go @@ -23,7 +23,7 @@ package directives import ( "errors" "fmt" - "io/ioutil" + "io" "os" "path/filepath" @@ -55,7 +55,7 @@ func add(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, patchTree return errors.New(fmt.Sprintf("COULD_NOT_OPEN_FROM:%s", addType.File)) } - replacingBytes, err = ioutil.ReadAll(fPatch) + replacingBytes, err = io.ReadAll(fPatch) if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_READ_FROM:%s", addType.File)) } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/lookaside.go b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/lookaside.go index d8d4f590..07ea6297 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/lookaside.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/lookaside.go @@ -27,7 +27,7 @@ import ( "crypto/sha256" "errors" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "time" @@ -66,7 +66,7 @@ func lookaside(cfg *srpmprocpb.Cfg, _ *data.ProcessData, md *data.ModeData, patc return errors.New(fmt.Sprintf("COULD_NOT_OPEN_FILE:%s", path)) } - bts, err := ioutil.ReadAll(f) + bts, err := io.ReadAll(f) if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_READ_FILE:%s", path)) } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/patch.go b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/patch.go index a2b562dd..b797bc8c 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/patch.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/patch.go @@ -34,10 +34,12 @@ import ( func patch(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error { for _, patch := range cfg.Patch { patchFile, err := patchTree.Filesystem.Open(patch.File) + pd.Log.Printf("[directives.patch] Parsing File: %s", patchFile.Name()) if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_OPEN_PATCH_FILE:%s", patch.File)) } files, _, err := gitdiff.Parse(patchFile) + if err != nil { pd.Log.Printf("could not parse patch file: %v", err) return errors.New(fmt.Sprintf("COULD_NOT_PARSE_PATCH_FILE:%s", patch.File)) @@ -55,9 +57,9 @@ func patch(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchTre return errors.New(fmt.Sprintf("COULD_NOT_OPEN_PATCH_SUBJECT:%s", srcPath)) } - err = gitdiff.NewApplier(patchSubjectFile).ApplyFile(&output, patchedFile) + err = gitdiff.Apply(&output, patchSubjectFile, patchedFile) if err != nil { - pd.Log.Printf("could not apply patch: %v", err) + pd.Log.Printf("[directives.patch] could not apply patch: \"%v\" on \"%s\" from \"%s\"", err, srcPath, patchSubjectFile.Name()) return errors.New(fmt.Sprintf("COULD_NOT_APPLY_PATCH_WITH_SUBJECT:%s", srcPath)) } } @@ -74,7 +76,7 @@ func patch(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchTre if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_CREATE_NEW_FILE:%s", srcPath)) } - err = gitdiff.NewApplier(newFile).ApplyFile(&output, patchedFile) + err = gitdiff.Apply(&output, newFile, patchedFile) if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_APPLY_PATCH_TO_NEW_FILE:%s", srcPath)) } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/replace.go b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/replace.go index 41d31d0a..36ad84cc 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/replace.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/replace.go @@ -23,7 +23,7 @@ package directives import ( "errors" "fmt" - "io/ioutil" + "io" "os" "github.com/go-git/go-git/v5" @@ -56,7 +56,7 @@ func replace(cfg *srpmprocpb.Cfg, pd *data.ProcessData, _ *data.ModeData, patchT return errors.New(fmt.Sprintf("COULD_NOT_OPEN_REPLACING:%s", replacing.WithFile)) } - replacingBytes, err := ioutil.ReadAll(fPatch) + replacingBytes, err := io.ReadAll(fPatch) if err != nil { return errors.New(fmt.Sprintf("COULD_NOT_READ_REPLACING:%s", replacing.WithFile)) } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/spec_change.go b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/spec_change.go index ea1152b2..82c52712 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/directives/spec_change.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/directives/spec_change.go @@ -23,7 +23,7 @@ package directives import ( "errors" "fmt" - "io/ioutil" + "io" "math" "os" "path/filepath" @@ -107,7 +107,7 @@ func sourcePatchOperationAfterLoop(req *sourcePatchOperationAfterLoopRequest) (b *req.newLines = append(*req.newLines, fmt.Sprintf("%s:%s%s", field, spaces, file.Name)) if req.expectedField == "Patch" && file.AddToPrep { - val := fmt.Sprintf("%%patch%d", fieldNum) + val := fmt.Sprintf("%%patch -P%d", fieldNum) if file.NPath > 0 { val = fmt.Sprintf("%s -p%d", val, file.NPath) } @@ -215,7 +215,7 @@ func specChange(cfg *srpmprocpb.Cfg, pd *data.ProcessData, md *data.ModeData, _ return errors.New("COULD_NOT_READ_SPEC_FILE") } - specBts, err := ioutil.ReadAll(specFile) + specBts, err := io.ReadAll(specFile) if err != nil { return errors.New("COULD_NOT_READ_ALL_BYTES") } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/misc/regex.go b/vendor/github.com/rocky-linux/srpmproc/pkg/misc/regex.go index 6cb6e08f..6782919f 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/misc/regex.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/misc/regex.go @@ -38,8 +38,8 @@ func GetTagImportRegex(pd *data.ProcessData) *regexp.Regexp { // if we are ok with importing that reference. We are looking for the traditional pattern, like "c9s", and also the // modular "stream---rhel- branch pattern as well func TaglessRefOk(tag string, pd *data.ProcessData) bool { - // First case is very easy: if we are "refs/heads/" , then this is def. a branch we should import - if strings.HasPrefix(tag, fmt.Sprintf("refs/heads/%s%d%s", pd.ImportBranchPrefix, pd.Version, pd.BranchSuffix)) { + // First case is very easy: if we are exactly "refs/heads/" , then this is def. a branch we should import + if tag == fmt.Sprintf("refs/heads/%s%d%s", pd.ImportBranchPrefix, pd.Version, pd.BranchSuffix) { return true } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/modes/git.go b/vendor/github.com/rocky-linux/srpmproc/pkg/modes/git.go index f151acad..452e7530 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/modes/git.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/modes/git.go @@ -21,12 +21,15 @@ package modes import ( + "bytes" "fmt" - "io/ioutil" + "io" + "log" "net/http" "path/filepath" "sort" "strings" + "text/template" "time" "github.com/go-git/go-git/v5/plumbing/transport" @@ -46,6 +49,15 @@ type remoteTarget struct { when time.Time } +// Struct to define the possible template values ( {{.Value}} in CDN URL strings: +type Lookaside struct { + Name string + Branch string + Hash string + Hashtype string + Filename string +} + type remoteTargetSlice []remoteTarget func (p remoteTargetSlice) Len() int { @@ -305,7 +317,7 @@ func (g *GitMode) WriteSource(pd *data.ProcessData, md *data.ModeData) error { return nil } - fileBytes, err := ioutil.ReadAll(metadataFile) + fileBytes, err := io.ReadAll(metadataFile) if err != nil { return fmt.Errorf("could not read metadata file: %v", err) } @@ -341,44 +353,66 @@ func (g *GitMode) WriteSource(pd *data.ProcessData, md *data.ModeData) error { } else { url := "" - // Alternate lookaside logic: if enabled, we pull from a new URL pattern - if !pd.AltLookAside { - url = fmt.Sprintf("%s/%s/%s/%s", pd.CdnUrl, md.Name, branchName, hash) - } else { - // We first need the hash algorithm based on length of hash: - hashType := "sha512" - switch len(hash) { - case 128: - hashType = "sha512" - case 64: - hashType = "sha256" - case 40: - hashType = "sha1" - case 32: - hashType = "md5" + + // We need to figure out the hashtype for templating purposes: + hashType := "sha512" + switch len(hash) { + case 128: + hashType = "sha512" + case 64: + hashType = "sha256" + case 40: + hashType = "sha1" + case 32: + hashType = "md5" + } + + // need the name of the file without "SOURCES/": + fileName := strings.Split(path, "/")[1] + + // Feed our template info to ProcessUrl and transform to the real values: ( {{.Name}}, {{.Branch}}, {{.Hash}}, {{.Hashtype}}, {{.Filename}} ) + url, hasTemplate := ProcessUrl(pd.CdnUrl, md.Name, branchName, hash, hashType, fileName) + + var req *http.Request + var resp *http.Response + + // Download the --cdn-url given, but *only* if it contains template strings ( {{.Name}} , {{.Hash}} , etc. ) + // Otherwise we need to fall back to the traditional cdn-url patterns + if hasTemplate { + pd.Log.Printf("downloading %s", url) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return fmt.Errorf("could not create new http request: %v", err) } + req.Header.Set("Accept-Encoding", "*") - // need the name of the file without "SOURCES/": - fileName := strings.Split(path, "/")[1] - - // Alt. lookaside url is of the form: / / / / / - url = fmt.Sprintf("%s/%s/%s/%s/%s/%s", pd.CdnUrl, md.Name, fileName, hashType, hash, fileName) + resp, err = client.Do(req) + if err != nil { + return fmt.Errorf("could not download dist-git file: %v", err) + } } - pd.Log.Printf("downloading %s", url) - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return fmt.Errorf("could not create new http request: %v", err) + // Default cdn-url: If we don't have a templated download string, try the default /// pattern: + if resp == nil || resp.StatusCode != http.StatusOK { + url = fmt.Sprintf("%s/%s/%s/%s", pd.CdnUrl, md.Name, branchName, hash) + pd.Log.Printf("Attempting default URL: %s", url) + req, err = http.NewRequest("GET", url, nil) + if err != nil { + return fmt.Errorf("could not create new http request: %v", err) + } + req.Header.Set("Accept-Encoding", "*") + resp, err = client.Do(req) + if err != nil { + return fmt.Errorf("could not download dist-git file: %v", err) + } } - req.Header.Set("Accept-Encoding", "*") - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("could not download dist-git file: %v", err) - } - if resp.StatusCode != http.StatusOK { + // If the default URL fails, we have one more pattern to try. The simple / pattern + // If this one fails, we are truly lost, and have to bail out w/ an error: + if resp == nil || resp.StatusCode != http.StatusOK { url = fmt.Sprintf("%s/%s", pd.CdnUrl, hash) + pd.Log.Printf("Attempting 2nd fallback URL: %s", url) req, err = http.NewRequest("GET", url, nil) if err != nil { return fmt.Errorf("could not create new http request: %v", err) @@ -393,7 +427,7 @@ func (g *GitMode) WriteSource(pd *data.ProcessData, md *data.ModeData) error { } } - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("could not read the whole dist-git file: %v", err) } @@ -458,3 +492,29 @@ func (g *GitMode) ImportName(pd *data.ProcessData, md *data.ModeData) string { return strings.Replace(strings.TrimPrefix(md.TagBranch, "refs/heads/"), "%", "_", -1) } + +// Given a cdnUrl string as input, return same string, but with substituted +// template values ( {{.Name}} , {{.Hash}}, {{.Filename}}, etc. ) +func ProcessUrl(cdnUrl string, name string, branch string, hash string, hashtype string, filename string) (string, bool) { + tmpUrl := Lookaside{name, branch, hash, hashtype, filename} + + // Return cdnUrl as-is if we don't have any templates ("{{ .Variable }}") to process: + if !(strings.Contains(cdnUrl, "{{") && strings.Contains(cdnUrl, "}}")) { + return cdnUrl, false + } + + // If we run into trouble with our template parsing, we'll just return the cdnUrl, exactly as we found it + tmpl, err := template.New("").Parse(cdnUrl) + if err != nil { + return cdnUrl, false + } + + var result bytes.Buffer + err = tmpl.Execute(&result, tmpUrl) + if err != nil { + log.Fatalf("ERROR: Could not process CDN URL template(s) from URL string: %s\n", cdnUrl) + } + + return result.String(), true + +} diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/fetch.go b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/fetch.go index da7d0df1..924f2c6e 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/fetch.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/fetch.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "net/http" "path/filepath" @@ -42,7 +41,7 @@ func Fetch(logger io.Writer, cdnUrl string, dir string, fs billy.Filesystem, sto return fmt.Errorf("could not open metadata file: %v", err) } - fileBytes, err := ioutil.ReadAll(metadataFile) + fileBytes, err := io.ReadAll(metadataFile) if err != nil { return fmt.Errorf("could not read metadata file: %v", err) } @@ -87,7 +86,7 @@ func Fetch(logger io.Writer, cdnUrl string, dir string, fs billy.Filesystem, sto return fmt.Errorf("could not download dist-git file: %v", err) } - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("could not read the whole dist-git file: %v", err) } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/patch.go b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/patch.go index 928d8aef..ab2b404d 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/patch.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/patch.go @@ -21,11 +21,9 @@ package srpmproc import ( - "encoding/json" "fmt" - "io/ioutil" + "io" "log" - "os" "path/filepath" "strings" "time" @@ -48,10 +46,16 @@ import ( func cfgPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error { // check CFG patches - _, err := patchTree.Filesystem.Stat("ROCKY/CFG") + // use PATCHES directory if it exists otherwise ROCKY/CFG + cfgdir := "PATCHES" + _, err := patchTree.Filesystem.Stat(cfgdir) + if err != nil { + cfgdir = "ROCKY/CFG" + } + _, err = patchTree.Filesystem.Stat(cfgdir) if err == nil { // iterate through patches - infos, err := patchTree.Filesystem.ReadDir("ROCKY/CFG") + infos, err := patchTree.Filesystem.ReadDir(cfgdir) if err != nil { return fmt.Errorf("could not walk patches: %v", err) } @@ -63,12 +67,12 @@ func cfgPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree } pd.Log.Printf("applying directive %s", info.Name()) - filePath := filepath.Join("ROCKY/CFG", info.Name()) + filePath := filepath.Join(cfgdir, info.Name()) directive, err := patchTree.Filesystem.Open(filePath) if err != nil { return fmt.Errorf("could not open directive file %s: %v", info.Name(), err) } - directiveBytes, err := ioutil.ReadAll(directive) + directiveBytes, err := io.ReadAll(directive) if err != nil { return fmt.Errorf("could not read directive file: %v", err) } @@ -81,11 +85,7 @@ func cfgPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree errs := directives.Apply(&cfg, pd, md, patchTree, pushTree) if errs != nil { - err := json.NewEncoder(os.Stdout).Encode(errs) - if err != nil { - return err - } - + fmt.Printf("errors: %v\n", errs) return fmt.Errorf("directives could not be applied") } } @@ -96,7 +96,12 @@ func cfgPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree func applyPatches(pd *data.ProcessData, md *data.ModeData, patchTree *git.Worktree, pushTree *git.Worktree) error { // check if patches exist - _, err := patchTree.Filesystem.Stat("ROCKY") + cfgdir := "PATCHES" + _, err := patchTree.Filesystem.Stat(cfgdir) + if err != nil { + cfgdir = "ROCKY" + } + _, err = patchTree.Filesystem.Stat(cfgdir) if err == nil { err := cfgPatches(pd, md, patchTree, pushTree) if err != nil { @@ -315,7 +320,7 @@ func patchModuleYaml(pd *data.ProcessData, md *data.ModeData) error { } } - content, err := ioutil.ReadAll(f) + content, err := io.ReadAll(f) if err != nil { return fmt.Errorf("could not read modulemd file: %v", err) } @@ -397,6 +402,12 @@ func patchModuleYaml(pd *data.ProcessData, md *data.ModeData) error { } } + // If we got this far, we're good. This means the repos, branches, and commits exist. + // If this option is on, just use the branch name. + if pd.ModuleBranchNames { + tipHash = md.PushBranch + } + rpm.Ref = tipHash } diff --git a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/process.go b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/process.go index 4712f74f..e4b4e206 100644 --- a/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/process.go +++ b/vendor/github.com/rocky-linux/srpmproc/pkg/srpmproc/process.go @@ -25,7 +25,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "log" "os" "os/exec" @@ -68,9 +67,10 @@ const ( type ProcessDataRequest struct { // Required - Version int - StorageAddr string - Package string + Version int + StorageAddr string + Package string + PackageGitName string // Optional ModuleMode bool @@ -101,8 +101,15 @@ type ProcessDataRequest struct { PackageVersion string PackageRelease string - TaglessMode bool - AltLookAside bool + TaglessMode bool + Cdn string + + ModuleBranchNames bool +} + +type LookasidePath struct { + Distro string + Url string } func gitlabify(str string) string { @@ -113,7 +120,57 @@ func gitlabify(str string) string { return strings.Replace(str, "+", "plus", -1) } +// List of distros and their lookaside patterns +// If we find one of these passed as --cdn (ex: "--cdn fedora"), then we override, and assign this URL to be our --cdn-url +func StaticLookasides() []LookasidePath { + centos := LookasidePath{ + Distro: "centos", + Url: "https://git.centos.org/sources/{{.Name}}/{{.Branch}}/{{.Hash}}", + } + centosStream := LookasidePath{ + Distro: "centos-stream", + Url: "https://sources.stream.centos.org/sources/rpms/{{.Name}}/{{.Filename}}/{{.Hashtype}}/{{.Hash}}/{{.Filename}}", + } + rocky8 := LookasidePath{ + Distro: "rocky8", + Url: "https://rocky-linux-sources-staging.a1.rockylinux.org/{{.Hash}}", + } + rocky := LookasidePath{ + Distro: "rocky", + Url: "https://sources.build.resf.org/{{.Hash}}", + } + fedora := LookasidePath{ + Distro: "fedora", + Url: "https://src.fedoraproject.org/repo/pkgs/{{.Name}}/{{.Filename}}/{{.Hashtype}}/{{.Hash}}/{{.Filename}}", + } + + return []LookasidePath{centos, centosStream, rocky8, rocky, fedora} + +} + +// Given a "--cdn" entry like "centos", we can search through our struct list of distros, and return the proper lookaside URL +// If we can't find it, we return false and the calling function will error out +func FindDistro(cdn string) (string, bool) { + var cdnUrl = "" + + // Loop through each distro in the static list defined, try to find a match with "--cdn": + for _, distro := range StaticLookasides() { + if distro.Distro == strings.ToLower(cdn) { + cdnUrl = distro.Url + return cdnUrl, true + } + } + return "", false +} + func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { + // Build the logger to use for the data import + var writer io.Writer = os.Stdout + if req.LogWriter != nil { + writer = req.LogWriter + } + logger := log.New(writer, "", log.LstdFlags) + // Set defaults if req.ModulePrefix == "" { req.ModulePrefix = ModulePrefixCentOS @@ -139,13 +196,22 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { if req.BranchPrefix == "" { req.BranchPrefix = "r" } - if req.CdnUrl == "" && !req.AltLookAside { + if req.CdnUrl == "" { req.CdnUrl = "https://git.centos.org/sources" } - // If altlookaside is enabled, and the CdnUrl hasn't been changed, then automatically set it to the default - // CentOS Stream (the new pattern very much won't work with the old git.centos.org/sources site) - if (req.CdnUrl == "https://git.centos.org/sources" || req.CdnUrl == "") && req.AltLookAside { - req.CdnUrl = "https://sources.stream.centos.org/sources/rpms" + + // If a Cdn distro is defined, we try to find a match from StaticLookasides() array of structs + // see if we have a match to --cdn (matching values are things like fedora, centos, rocky8, etc.) + // If we match, then we want to short-circuit the CdnUrl to the assigned distro's one + if req.Cdn != "" { + newCdn, foundDistro := FindDistro(req.Cdn) + + if !foundDistro { + return nil, fmt.Errorf("Error, distro name given as --cdn argument is not valid.") + } + + req.CdnUrl = newCdn + logger.Printf("Discovered --cdn distro: %s . Using override CDN URL Pattern: %s", req.Cdn, req.CdnUrl) } // Validate required @@ -153,6 +219,11 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { return nil, fmt.Errorf("package cannot be empty") } + // tells srpmproc what the source name actually is + if req.PackageGitName == "" { + req.PackageGitName = req.Package + } + var importer data.ImportMode var blobStorage blob.Storage @@ -172,9 +243,9 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { sourceRpmLocation := "" if req.ModuleMode { - sourceRpmLocation = fmt.Sprintf("%s/%s", req.ModulePrefix, req.Package) + sourceRpmLocation = fmt.Sprintf("%s/%s", req.ModulePrefix, req.PackageGitName) } else { - sourceRpmLocation = fmt.Sprintf("%s/%s", req.RpmPrefix, req.Package) + sourceRpmLocation = fmt.Sprintf("%s/%s", req.RpmPrefix, req.PackageGitName) } importer = &modes.GitMode{} @@ -214,12 +285,6 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { reqFsCreator = req.FsCreator } - var writer io.Writer = os.Stdout - if req.LogWriter != nil { - writer = req.LogWriter - } - logger := log.New(writer, "", log.LstdFlags) - if req.TmpFsMode != "" { logger.Printf("using tmpfs dir: %s", req.TmpFsMode) fsCreator = func(branch string) (billy.Filesystem, error) { @@ -276,7 +341,8 @@ func NewProcessData(req *ProcessDataRequest) (*data.ProcessData, error) { PackageVersion: req.PackageVersion, PackageRelease: req.PackageRelease, TaglessMode: req.TaglessMode, - AltLookAside: req.AltLookAside, + Cdn: req.Cdn, + ModuleBranchNames: req.ModuleBranchNames, }, nil } @@ -334,6 +400,7 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { list, err := remote.List(&git.ListOptions{ Auth: pd.Authenticator, }) + if err != nil { log.Println("ignoring no-dup-mode") } else { @@ -354,17 +421,17 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { if pd.SingleTag != "" { md.Branches = []string{fmt.Sprintf("refs/tags/%s", pd.SingleTag)} } else if len(pd.ManualCommits) > 0 { - md.Branches = []string{} - for _, commit := range pd.ManualCommits { - branchCommit := strings.Split(commit, ":") - if len(branchCommit) != 2 { - return nil, fmt.Errorf("invalid manual commit list") - } + log.Println("Manual commits were listed for import. Switching to perform a tagless import of these commit(s).") + pd.TaglessMode = true + return processRPMTagless(pd) + } - head := fmt.Sprintf("refs/tags/imports/%s/%s-%s", branchCommit[0], md.Name, branchCommit[1]) - md.Branches = append(md.Branches, head) - commitPin[head] = branchCommit[1] - } + // If we have no valid branches to consider, then we'll automatically switch to attempt a tagless import: + if len(md.Branches) == 0 { + log.Println("No valid tags (refs/tags/imports/*) found in repository! Switching to perform a tagless import.") + pd.TaglessMode = true + result, err := processRPMTagless(pd) + return result, err } for _, branch := range md.Branches { @@ -527,7 +594,7 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { if err != nil { return nil, fmt.Errorf("could not open ignored source file %s: %v", sourcePath, err) } - sourceFileBts, err := ioutil.ReadAll(sourceFile) + sourceFileBts, err := io.ReadAll(sourceFile) if err != nil { return nil, fmt.Errorf("could not read the whole of ignored source file: %v", err) } @@ -596,6 +663,17 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { // show status status, _ := w.Status() + if !pd.ModuleMode { + if status.IsClean() { + pd.Log.Printf("No changes detected. Our downstream is up to date.") + head, err := repo.Head() + if err != nil { + return nil, fmt.Errorf("error getting HEAD: %v", err) + } + latestHashForBranch[md.PushBranch] = head.Hash().String() + continue + } + } pd.Log.Printf("successfully processed:\n%s", status) statusLines := strings.Split(status.String(), "\n") @@ -681,7 +759,6 @@ func ProcessRPM(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { } // Process for when we want to import a tagless repo (like from CentOS Stream) -// func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error) { pd.Log.Println("Tagless mode detected, attempting import of latest commit") @@ -746,6 +823,20 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error sourceWorktree := *md.Worktree localPath := "" + // if a manual commit list is provided, we want to create our md.Branches[] array in a special format: + if len(pd.ManualCommits) > 0 { + md.Branches = []string{} + for _, commit := range pd.ManualCommits { + branchCommit := strings.Split(commit, ":") + if len(branchCommit) != 2 { + return nil, fmt.Errorf("invalid manual commit list") + } + + head := fmt.Sprintf("COMMIT:%s:%s", branchCommit[0], branchCommit[1]) + md.Branches = append(md.Branches, head) + } + } + for _, branch := range md.Branches { md.Repo = &sourceRepo md.Worktree = &sourceWorktree @@ -765,13 +856,35 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error return nil, fmt.Errorf("Could not create temporary directory: %s", localPath) } + // we'll make our branch we're processing more presentable if it's in the COMMIT:: format: + if strings.HasPrefix(branch, "COMMIT:") { + branch = fmt.Sprintf("refs/heads/%s", strings.Split(branch, ":")[1]) + } + // Clone repo into the temporary path, but only the tag we're interested in: // (TODO: will probably need to assign this a variable or use the md struct gitrepo object to perform a successful tag+push later) - _, _ = git.PlainClone(localPath, false, &git.CloneOptions{ + rTmp, err := git.PlainClone(localPath, false, &git.CloneOptions{ URL: pd.RpmLocation, SingleBranch: true, ReferenceName: plumbing.ReferenceName(branch), }) + if err != nil { + return nil, err + } + + // If we're dealing with a special manual commit to import ("COMMIT::"), then we need to check out that + // specific hash from the repo we are importing from: + if strings.HasPrefix(md.TagBranch, "COMMIT:") { + commitList := strings.Split(md.TagBranch, ":") + + wTmp, _ := rTmp.Worktree() + err = wTmp.Checkout(&git.CheckoutOptions{ + Hash: plumbing.NewHash(commitList[2]), + }) + if err != nil { + return nil, fmt.Errorf("Could not find manual commit %s in the repository. Must be a valid commit hash.", commitList[2]) + } + } // Now that we're cloned into localPath, we need to "covert" the import into the old format // We want sources to become .PKGNAME.metadata, we want SOURCES and SPECS folders, etc. @@ -788,9 +901,9 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error // get name-version-release of tagless repo, only if we're not a module repo: if !pd.ModuleMode { - nvrString := getVersionFromSpec(md.Name, localPath, pd.Version) - if nvrString == "" { - return nil, fmt.Errorf("Error using rpm or rpmbuild to build SRPM and determine version info! (tagless mode)") + nvrString, err := getVersionFromSpec(localPath, pd.Version) + if err != nil { + return nil, err } // Set version and release fields we extracted (name|version|release are separated by pipes) @@ -852,6 +965,12 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error Force: true, }) + // These os commands actually move the data from our cloned import repo to our cloned "push" repo (upstream -> downstream) + // First we clobber the target push repo's data, and then move the new data into it from the upstream repo we cloned earlier + os.RemoveAll(fmt.Sprintf("%s_gitpush/SPECS", localPath)) + os.RemoveAll(fmt.Sprintf("%s_gitpush/SOURCES", localPath)) + os.RemoveAll(fmt.Sprintf("%s_gitpush/.gitignore", localPath)) + os.RemoveAll(fmt.Sprintf("%s_gitpush/%s.metadata", localPath, md.Name)) os.Rename(fmt.Sprintf("%s/SPECS", localPath), fmt.Sprintf("%s_gitpush/SPECS", localPath)) os.Rename(fmt.Sprintf("%s/SOURCES", localPath), fmt.Sprintf("%s_gitpush/SOURCES", localPath)) os.Rename(fmt.Sprintf("%s/.gitignore", localPath), fmt.Sprintf("%s_gitpush/.gitignore", localPath)) @@ -888,10 +1007,21 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error err = w.AddWithOptions(&git.AddOptions{All: true}) if err != nil { - return nil, fmt.Errorf("Error adding SOURCES/ , SPECS/ or .metadata file to commit list.") + return nil, fmt.Errorf("error adding SOURCES/ , SPECS/ or .metadata file to commit list") } - status, err := w.Status() + status, _ := w.Status() + if !pd.ModuleMode { + if status.IsClean() { + pd.Log.Printf("No changes detected. Our downstream is up to date.") + head, err := pushRepo.Head() + if err != nil { + return nil, fmt.Errorf("error getting HEAD: %v", err) + } + latestHashForBranch[md.PushBranch] = head.Hash().String() + continue + } + } pd.Log.Printf("successfully processed:\n%s", status) // assign tag for our new remote we're about to push (derived from the SRPM version) @@ -985,7 +1115,6 @@ func processRPMTagless(pd *data.ProcessData) (*srpmprocpb.ProcessResponse, error Version: pd.PackageVersion, Release: pd.PackageRelease, } - } // return struct with all our branch:commit and branch:version+release mappings @@ -1010,7 +1139,7 @@ func convertLocalRepo(pkgName string, localRepo string) (bool, error) { } // Loop through each file/folder and operate accordingly: - files, err := ioutil.ReadDir(localRepo) + files, err := os.ReadDir(localRepo) if err != nil { return false, err } @@ -1108,113 +1237,50 @@ func convertMetaData(pkgName string, localRepo string) bool { // Given a local checked out folder and package name, including SPECS/ , SOURCES/ , and .package.metadata, this will: // - create a "dummy" SRPM (using dummy sources files we use to populate tarballs from lookaside) // - extract RPM version info from that SRPM, and return it +// // If we are in tagless mode, we need to get a package version somehow! -func getVersionFromSpec(pkgName string, localRepo string, majorVersion int) string { +func getVersionFromSpec(localRepo string, majorVersion int) (string, error) { // Make sure we have "rpm" and "rpmbuild" and "cp" available in our PATH. Otherwise, this won't work: - _, err := exec.LookPath("rpm") + _, err := exec.LookPath("rpmspec") if err != nil { - return "" + return "", fmt.Errorf("Could not find rpmspec program in PATH") } - _, err = exec.LookPath("rpmbuild") + // Read the first file from SPECS/ to get our spec file + // (there should only be one file - we check that it ends in ".spec" just to be sure!) + lsTmp, err := os.ReadDir(fmt.Sprintf("%s/SPECS/", localRepo)) if err != nil { - return "" + return "", err + } + specFile := lsTmp[0].Name() + + if !strings.HasSuffix(specFile, ".spec") { + return "", fmt.Errorf("First file found in SPECS/ is not a .spec file! Check the SPECS/ directory in the repo?") } - _, err = exec.LookPath("cp") + // Call the rpmspec binary to extract the version-release info out of it, and tack on ".el" at the end: + cmdArgs := []string{ + "--srpm", + fmt.Sprintf(`--define=dist .el%d`, majorVersion), + fmt.Sprintf(`--define=_topdir %s`, localRepo), + "-q", + "--queryformat", + `%{NAME}|%{VERSION}|%{RELEASE}\n`, + fmt.Sprintf("%s/SPECS/%s", localRepo, specFile), + } + cmd := exec.Command("rpmspec", cmdArgs...) + nvrTmp, err := cmd.Output() if err != nil { - return "" + return "", fmt.Errorf("Error running rpmspec command to determine RPM name-version-release identifier. \nCommand attempted: %s \nCommand output: %s", cmd.String(), string(nvrTmp)) } - // create separate temp folder space to do our RPM work - we don't want to accidentally contaminate the main Git area: - rpmBuildPath := fmt.Sprintf("%s_rpm", localRepo) - os.Mkdir(rpmBuildPath, 0o755) - - // Copy SOURCES/ and SPECS/ into the temp rpmbuild directory recursively - // Yes, we could create or import an elaborate Go-native way to do this, but damnit this is easier: - cmdArgs := strings.Fields(fmt.Sprintf("cp -rp %s/SOURCES %s/SPECS %s/", localRepo, localRepo, rpmBuildPath)) - if err := exec.Command(cmdArgs[0], cmdArgs[1:]...).Run(); err != nil { - log.Println(err) - return "" - } - - // Loop through ..metadata and get the file names we need to make our SRPM: - lookAside, err := os.Open(fmt.Sprintf("%s/.%s.metadata", localRepo, pkgName)) - if err != nil { - log.Println(err) - return "" - } - - // Split file into lines and start processing: - scanner := bufio.NewScanner(lookAside) - scanner.Split(bufio.ScanLines) - - // loop through each line, and: - // - isolate the SOURCES/filename entry - // - write out a dummy file of the same name to rpmBuildPath/SOURCES - for scanner.Scan() { - - // lookaside source is always the 2nd part of the line (after the long SHA sum) - srcFile := strings.Fields(scanner.Text())[1] - - // write a dummy file of the same name into the rpmbuild SOURCES/ directory: - dummyFile, err := os.OpenFile(fmt.Sprintf("%s/%s", rpmBuildPath, srcFile), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) - if err != nil { - return "" - } - writer := bufio.NewWriter(dummyFile) - _, _ = writer.WriteString("This is a dummy lookaside file generated by srpmproc. It is only needed to get a working SRPM and extract version information. Please disregard\n") - writer.Flush() - dummyFile.Close() - } - - lookAside.Close() - - // Now, call rpmbuild to produce the dummy src file: - // Example: rpmbuild --define "_topdir /tmp/srpmproctmp_httpd1988142783_rpm" -bs /tmp/srpmproctmp_httpd1988142783_rpm/SPECS/*.spec - cmd := exec.Command("rpmbuild", fmt.Sprintf(`--define=_topdir %s`, rpmBuildPath), fmt.Sprintf(`--define=dist .el%d`, majorVersion), "-bs", fmt.Sprintf("%s/SPECS/%s.spec", rpmBuildPath, pkgName)) - if err := cmd.Run(); err != nil { - log.Println(err) - return "" - } - - // Read the first file from the SRPMS/ folder in rpmBuildPath. It should be the SRPM that rpmbuild produced above - // (there should only be one file - we check that it ends in ".rpm" just to be sure!) - lsTmp, err := ioutil.ReadDir(fmt.Sprintf("%s/SRPMS/", rpmBuildPath)) - if err != nil { - log.Println(err) - return "" - } - - srpmFile := lsTmp[0].Name() - - if !strings.HasSuffix(srpmFile, ".rpm") { - log.Println("Error, file found in dummy SRPMS directory did not have an .rpm extension! Perhaps rpmbuild didn't produce a proper source RPM?") - return "" - } - - // Call the rpm binary to extract the version-release info out of it, and tack on ".el" at the end: - cmd = exec.Command("rpm", "-qp", "--qf", `%{NAME}|%{VERSION}|%{RELEASE}\n`, fmt.Sprintf("%s/SRPMS/%s", rpmBuildPath, srpmFile)) - nvrTmp, err := cmd.CombinedOutput() - if err != nil { - log.Println("Error running rpm command to extract temporary SRPM name-version-release identifiers.") - log.Println("rpmbuild output: ", string(nvrTmp)) - log.Println("rpmbuild command: ", cmd.String()) - return "" - } - - // Pull first line of the rpm command's output to get the name-version-release number (there should only be 1 line) + // Pull first line of the version output to get the name-version-release number (there should only be 1 line) nvr := string(nvrTmp) nvr = strings.Fields(nvr)[0] - // Clean up: delete the temporary directory - if err := os.RemoveAll(rpmBuildPath); err != nil { - log.Printf("Error cleaning up temporary RPM directory %s . Non-fatal, continuing anyway...\n", rpmBuildPath) - } - // return name-version-release string we derived: - log.Printf("Derived NVR %s from tagless repo via temporary SRPM build\n", nvr) - return nvr + log.Printf("Derived NVR %s from tagless repo via rpmspec command\n", nvr) + return nvr, nil } // We need to loop through the lookaside blob files ("SourcesToIgnore"), @@ -1248,7 +1314,7 @@ func processLookasideSources(pd *data.ProcessData, md *data.ModeData, localDir s if err != nil { return fmt.Errorf("could not open ignored source file %s: %v", sourcePath, err) } - sourceFileBts, err := ioutil.ReadAll(sourceFile) + sourceFileBts, err := io.ReadAll(sourceFile) if err != nil { return fmt.Errorf("could not read the whole of ignored source file: %v", err) } diff --git a/vendor/github.com/sagikazarmark/locafero/.editorconfig b/vendor/github.com/sagikazarmark/locafero/.editorconfig new file mode 100644 index 00000000..6f944f54 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/.editorconfig @@ -0,0 +1,21 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 + +[*.go] +indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc new file mode 100644 index 00000000..2e0f9f5f --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/.gitignore b/vendor/github.com/sagikazarmark/locafero/.gitignore new file mode 100644 index 00000000..8f07e601 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/.gitignore @@ -0,0 +1,8 @@ +/.devenv/ +/.direnv/ +/.task/ +/bin/ +/build/ +/tmp/ +/var/ +/vendor/ diff --git a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml new file mode 100644 index 00000000..829de2a4 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml @@ -0,0 +1,27 @@ +run: + timeout: 10m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/sagikazarmark/locafero) + goimports: + local-prefixes: github.com/sagikazarmark/locafero + misspell: + locale: US + nolintlint: + allow-leading-space: false # require machine-readable nolint directives (with no leading space) + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + revive: + confidence: 0 + +linters: + enable: + - gci + - goimports + - misspell + - nolintlint + - revive diff --git a/vendor/github.com/sagikazarmark/locafero/BUILD.bazel b/vendor/github.com/sagikazarmark/locafero/BUILD.bazel new file mode 100644 index 00000000..6e484e96 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "locafero", + srcs = [ + "file_type.go", + "finder.go", + "glob.go", + "glob_windows.go", + "helpers.go", + ], + importmap = "peridot.resf.org/vendor/github.com/sagikazarmark/locafero", + importpath = "github.com/sagikazarmark/locafero", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/sourcegraph/conc/iter", + "//vendor/github.com/spf13/afero", + ], +) diff --git a/vendor/github.com/sagikazarmark/locafero/LICENSE b/vendor/github.com/sagikazarmark/locafero/LICENSE new file mode 100644 index 00000000..a70b0f29 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2023 Márk Sági-Kazár + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md new file mode 100644 index 00000000..a48e8e97 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/README.md @@ -0,0 +1,37 @@ +# Finder library for [Afero](https://github.com/spf13/afero) + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) + +**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** + +> [!WARNING] +> This is an experimental library under development. +> +> **Backwards compatibility is not guaranteed, expect breaking changes.** + +## Installation + +```shell +go get github.com/sagikazarmark/locafero +``` + +## Usage + +Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev. + +## Development + +**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** + +Run the test suite: + +```shell +just test +``` + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/sagikazarmark/locafero/file_type.go b/vendor/github.com/sagikazarmark/locafero/file_type.go new file mode 100644 index 00000000..9a9b1402 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/file_type.go @@ -0,0 +1,28 @@ +package locafero + +import "io/fs" + +// FileType represents the kind of entries [Finder] can return. +type FileType int + +const ( + FileTypeAll FileType = iota + FileTypeFile + FileTypeDir +) + +func (ft FileType) matchFileInfo(info fs.FileInfo) bool { + switch ft { + case FileTypeAll: + return true + + case FileTypeFile: + return !info.IsDir() + + case FileTypeDir: + return info.IsDir() + + default: + return false + } +} diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go new file mode 100644 index 00000000..ef8d5471 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -0,0 +1,165 @@ +// Package finder looks for files and directories in an {fs.Fs} filesystem. +package locafero + +import ( + "errors" + "io/fs" + "path/filepath" + "strings" + + "github.com/sourcegraph/conc/iter" + "github.com/spf13/afero" +) + +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder struct { + // Paths represents a list of locations that the [Finder] will search in. + // + // They are essentially the root directories or starting points for the search. + // + // Examples: + // - home/user + // - etc + Paths []string + + // Names are specific entries that the [Finder] will look for within the given Paths. + // + // It provides the capability to search for entries with depth, + // meaning it can target deeper locations within the directory structure. + // + // It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns. + // + // Examples: + // - config.yaml + // - home/*/config.yaml + // - home/*/config.* + Names []string + + // Type restricts the kind of entries returned by the [Finder]. + // + // This parameter helps in differentiating and filtering out files from directories or vice versa. + Type FileType +} + +// Find looks for files and directories in an [afero.Fs] filesystem. +func (f Finder) Find(fsys afero.Fs) ([]string, error) { + // Arbitrary go routine limit (TODO: make this a parameter) + // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() + + type searchItem struct { + path string + name string + } + + var searchItems []searchItem + + for _, searchPath := range f.Paths { + searchPath := searchPath + + for _, searchName := range f.Names { + searchName := searchName + + searchItems = append(searchItems, searchItem{searchPath, searchName}) + + // pool.Go(func() ([]string, error) { + // // If the name contains any glob character, perform a glob match + // if strings.ContainsAny(searchName, globMatch) { + // return globWalkSearch(fsys, searchPath, searchName, f.Type) + // } + // + // return statSearch(fsys, searchPath, searchName, f.Type) + // }) + } + } + + // allResults, err := pool.Wait() + // if err != nil { + // return nil, err + // } + + allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { + // If the name contains any glob character, perform a glob match + if strings.ContainsAny(item.name, globMatch) { + return globWalkSearch(fsys, item.path, item.name, f.Type) + } + + return statSearch(fsys, item.path, item.name, f.Type) + }) + if err != nil { + return nil, err + } + + var results []string + + for _, r := range allResults { + results = append(results, r...) + } + + // Sort results in alphabetical order for now + // sort.Strings(results) + + return results, nil +} + +func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { + var results []string + + err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip the root path + if p == searchPath { + return nil + } + + var result error + + // Stop reading subdirectories + // TODO: add depth detection here + if fileInfo.IsDir() && filepath.Dir(p) == searchPath { + result = fs.SkipDir + } + + // Skip unmatching type + if !searchType.matchFileInfo(fileInfo) { + return result + } + + match, err := filepath.Match(searchName, fileInfo.Name()) + if err != nil { + return err + } + + if match { + results = append(results, p) + } + + return result + }) + if err != nil { + return results, err + } + + return results, nil +} + +func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { + filePath := filepath.Join(searchPath, searchName) + + fileInfo, err := fsys.Stat(filePath) + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + if err != nil { + return nil, err + } + + // Skip unmatching type + if !searchType.matchFileInfo(fileInfo) { + return nil, nil + } + + return []string{filePath}, nil +} diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock new file mode 100644 index 00000000..4bea8154 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix new file mode 100644 index 00000000..cddf1d40 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -0,0 +1,47 @@ +{ + description = "Finder library for Afero"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + packages = with pkgs; [ + just + + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + + ci_1_21 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_21; + }; + }; + }; + }; + }; +} diff --git a/vendor/github.com/sagikazarmark/locafero/glob.go b/vendor/github.com/sagikazarmark/locafero/glob.go new file mode 100644 index 00000000..00f833e9 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob.go @@ -0,0 +1,5 @@ +//go:build !windows + +package locafero + +const globMatch = "*?[]\\^" diff --git a/vendor/github.com/sagikazarmark/locafero/glob_windows.go b/vendor/github.com/sagikazarmark/locafero/glob_windows.go new file mode 100644 index 00000000..7aec2b24 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob_windows.go @@ -0,0 +1,8 @@ +//go:build windows + +package locafero + +// See [filepath.Match]: +// +// On Windows, escaping is disabled. Instead, '\\' is treated as path separator. +const globMatch = "*?[]^" diff --git a/vendor/github.com/sagikazarmark/locafero/helpers.go b/vendor/github.com/sagikazarmark/locafero/helpers.go new file mode 100644 index 00000000..05b43448 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/helpers.go @@ -0,0 +1,41 @@ +package locafero + +import "fmt" + +// NameWithExtensions creates a list of names from a base name and a list of extensions. +// +// TODO: find a better name for this function. +func NameWithExtensions(baseName string, extensions ...string) []string { + var names []string + + if baseName == "" { + return names + } + + for _, ext := range extensions { + if ext == "" { + continue + } + + names = append(names, fmt.Sprintf("%s.%s", baseName, ext)) + } + + return names +} + +// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions, +// plus it adds the base name (without any extensions) to the end of the list. +// +// TODO: find a better name for this function. +func NameWithOptionalExtensions(baseName string, extensions ...string) []string { + var names []string + + if baseName == "" { + return names + } + + names = NameWithExtensions(baseName, extensions...) + names = append(names, baseName) + + return names +} diff --git a/vendor/github.com/sagikazarmark/locafero/justfile b/vendor/github.com/sagikazarmark/locafero/justfile new file mode 100644 index 00000000..00a88850 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/justfile @@ -0,0 +1,11 @@ +default: + just --list + +test: + go test -race -v ./... + +lint: + golangci-lint run + +fmt: + golangci-lint run --fix diff --git a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/vendor/github.com/sagikazarmark/slog-shim/.editorconfig new file mode 100644 index 00000000..1fb0e1be --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.nix] +indent_size = 2 + +[{Makefile,*.mk}] +indent_style = tab + +[Taskfile.yaml] +indent_size = 2 diff --git a/vendor/github.com/sagikazarmark/slog-shim/.envrc b/vendor/github.com/sagikazarmark/slog-shim/.envrc new file mode 100644 index 00000000..3ce7171a --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +fi +use flake . --impure diff --git a/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/vendor/github.com/sagikazarmark/slog-shim/.gitignore new file mode 100644 index 00000000..dc6d8b58 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/.gitignore @@ -0,0 +1,4 @@ +/.devenv/ +/.direnv/ +/.task/ +/build/ diff --git a/vendor/github.com/sagikazarmark/slog-shim/BUILD.bazel b/vendor/github.com/sagikazarmark/slog-shim/BUILD.bazel new file mode 100644 index 00000000..7b640bfc --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "slog-shim", + srcs = [ + "attr.go", + "attr_120.go", + "handler.go", + "handler_120.go", + "json_handler.go", + "json_handler_120.go", + "level.go", + "level_120.go", + "logger.go", + "logger_120.go", + "record.go", + "record_120.go", + "text_handler.go", + "text_handler_120.go", + "value.go", + "value_120.go", + ], + importmap = "peridot.resf.org/vendor/github.com/sagikazarmark/slog-shim", + importpath = "github.com/sagikazarmark/slog-shim", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/exp/slog"], +) diff --git a/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/vendor/github.com/sagikazarmark/slog-shim/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sagikazarmark/slog-shim/README.md b/vendor/github.com/sagikazarmark/slog-shim/README.md new file mode 100644 index 00000000..1f5be85e --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/README.md @@ -0,0 +1,81 @@ +# [slog](https://pkg.go.dev/log/slog) shim + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/slog-shim/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) + +Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library. +Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately, +especially since updating to Go 1.21 is a decision that most libraries won't make overnight. + +Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog). +While it's generally advised against using experimental packages in production, +this one served as a sort of backport package for the last few years, +incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`). + +This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`. + +**How does it achieve backwards compatibility?** + +Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is. +(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.) + +This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`. +Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly. + +For older Go versions, the library might become unstable after removing the shim. +However, since those older versions are no longer supported, the promise of backward compatibility remains intact. + +## Installation + +```shell +go get github.com/sagikazarmark/slog-shim +``` + +## Usage + +Import this package into your library and use it in your public API: + +```go +package mylib + +import slog "github.com/sagikazarmark/slog-shim" + +func New(logger *slog.Logger) MyLib { + // ... +} +``` + +When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21): + +```go +package main + +import "log/slog" + +// OR + +import "golang.org/x/exp/slog" + +mylib.New(slog.Default()) +``` + +**Make sure consumers are aware that your API behaves differently on different Go versions.** + +Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code: + +```diff +package mylib + +- import slog "github.com/sagikazarmark/slog-shim" ++ import "log/slog" + +func New(logger *slog.Logger) MyLib { + // ... +} +``` + +## License + +The project is licensed under a [BSD-style license](LICENSE). diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr.go b/vendor/github.com/sagikazarmark/slog-shim/attr.go new file mode 100644 index 00000000..89608bf3 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/attr.go @@ -0,0 +1,74 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "log/slog" + "time" +) + +// An Attr is a key-value pair. +type Attr = slog.Attr + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return slog.String(key, value) +} + +// Int64 returns an Attr for an int64. +func Int64(key string, value int64) Attr { + return slog.Int64(key, value) +} + +// Int converts an int to an int64 and returns +// an Attr with that value. +func Int(key string, value int) Attr { + return slog.Int(key, value) +} + +// Uint64 returns an Attr for a uint64. +func Uint64(key string, v uint64) Attr { + return slog.Uint64(key, v) +} + +// Float64 returns an Attr for a floating-point number. +func Float64(key string, v float64) Attr { + return slog.Float64(key, v) +} + +// Bool returns an Attr for a bool. +func Bool(key string, v bool) Attr { + return slog.Bool(key, v) +} + +// Time returns an Attr for a time.Time. +// It discards the monotonic portion. +func Time(key string, v time.Time) Attr { + return slog.Time(key, v) +} + +// Duration returns an Attr for a time.Duration. +func Duration(key string, v time.Duration) Attr { + return slog.Duration(key, v) +} + +// Group returns an Attr for a Group Value. +// The first argument is the key; the remaining arguments +// are converted to Attrs as in [Logger.Log]. +// +// Use Group to collect several key-value pairs under a single +// key on a log line, or as the result of LogValue +// in order to log a single value as multiple Attrs. +func Group(key string, args ...any) Attr { + return slog.Group(key, args...) +} + +// Any returns an Attr for the supplied value. +// See [Value.AnyValue] for how values are treated. +func Any(key string, value any) Attr { + return slog.Any(key, value) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go new file mode 100644 index 00000000..b6648133 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "time" + + "golang.org/x/exp/slog" +) + +// An Attr is a key-value pair. +type Attr = slog.Attr + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return slog.String(key, value) +} + +// Int64 returns an Attr for an int64. +func Int64(key string, value int64) Attr { + return slog.Int64(key, value) +} + +// Int converts an int to an int64 and returns +// an Attr with that value. +func Int(key string, value int) Attr { + return slog.Int(key, value) +} + +// Uint64 returns an Attr for a uint64. +func Uint64(key string, v uint64) Attr { + return slog.Uint64(key, v) +} + +// Float64 returns an Attr for a floating-point number. +func Float64(key string, v float64) Attr { + return slog.Float64(key, v) +} + +// Bool returns an Attr for a bool. +func Bool(key string, v bool) Attr { + return slog.Bool(key, v) +} + +// Time returns an Attr for a time.Time. +// It discards the monotonic portion. +func Time(key string, v time.Time) Attr { + return slog.Time(key, v) +} + +// Duration returns an Attr for a time.Duration. +func Duration(key string, v time.Duration) Attr { + return slog.Duration(key, v) +} + +// Group returns an Attr for a Group Value. +// The first argument is the key; the remaining arguments +// are converted to Attrs as in [Logger.Log]. +// +// Use Group to collect several key-value pairs under a single +// key on a log line, or as the result of LogValue +// in order to log a single value as multiple Attrs. +func Group(key string, args ...any) Attr { + return slog.Group(key, args...) +} + +// Any returns an Attr for the supplied value. +// See [Value.AnyValue] for how values are treated. +func Any(key string, value any) Attr { + return slog.Any(key, value) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/vendor/github.com/sagikazarmark/slog-shim/flake.lock new file mode 100644 index 00000000..7e8898e9 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/flake.lock @@ -0,0 +1,273 @@ +{ + "nodes": { + "devenv": { + "inputs": { + "flake-compat": "flake-compat", + "nix": "nix", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1694097209, + "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", + "owner": "cachix", + "repo": "devenv", + "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1693611461, + "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "lowdown-src": { + "flake": false, + "locked": { + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "owner": "kristapsdz", + "repo": "lowdown", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "type": "github" + }, + "original": { + "owner": "kristapsdz", + "repo": "lowdown", + "type": "github" + } + }, + "nix": { + "inputs": { + "lowdown-src": "lowdown-src", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1676545802, + "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "owner": "domenkozar", + "repo": "nix", + "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "relaxed-flakes", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1678875422, + "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1693471703, + "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1694345580, + "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "master", + "repo": "nixpkgs", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1688056373, + "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/vendor/github.com/sagikazarmark/slog-shim/flake.nix new file mode 100644 index 00000000..7239bbc2 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/flake.nix @@ -0,0 +1,57 @@ +{ + inputs = { + # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + nixpkgs.url = "github:NixOS/nixpkgs/master"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_21; + }; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + + ci_1_19 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_19; + }; + }; + + ci_1_20 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_20; + }; + }; + + ci_1_21 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_21; + }; + }; + }; + }; + }; +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler.go b/vendor/github.com/sagikazarmark/slog-shim/handler.go new file mode 100644 index 00000000..f55556ae --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/handler.go @@ -0,0 +1,45 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "log/slog" +) + +// A Handler handles log records produced by a Logger.. +// +// A typical handler may print log records to standard error, +// or write them to a file or database, or perhaps augment them +// with additional attributes and pass them on to another handler. +// +// Any of the Handler's methods may be called concurrently with itself +// or with other methods. It is the responsibility of the Handler to +// manage this concurrency. +// +// Users of the slog package should not invoke Handler methods directly. +// They should use the methods of [Logger] instead. +type Handler = slog.Handler + +// HandlerOptions are options for a TextHandler or JSONHandler. +// A zero HandlerOptions consists entirely of default values. +type HandlerOptions = slog.HandlerOptions + +// Keys for "built-in" attributes. +const ( + // TimeKey is the key used by the built-in handlers for the time + // when the log method is called. The associated Value is a [time.Time]. + TimeKey = slog.TimeKey + // LevelKey is the key used by the built-in handlers for the level + // of the log call. The associated value is a [Level]. + LevelKey = slog.LevelKey + // MessageKey is the key used by the built-in handlers for the + // message of the log call. The associated value is a string. + MessageKey = slog.MessageKey + // SourceKey is the key used by the built-in handlers for the source file + // and line of the log call. The associated value is a string. + SourceKey = slog.SourceKey +) diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go new file mode 100644 index 00000000..67005757 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go @@ -0,0 +1,45 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "golang.org/x/exp/slog" +) + +// A Handler handles log records produced by a Logger.. +// +// A typical handler may print log records to standard error, +// or write them to a file or database, or perhaps augment them +// with additional attributes and pass them on to another handler. +// +// Any of the Handler's methods may be called concurrently with itself +// or with other methods. It is the responsibility of the Handler to +// manage this concurrency. +// +// Users of the slog package should not invoke Handler methods directly. +// They should use the methods of [Logger] instead. +type Handler = slog.Handler + +// HandlerOptions are options for a TextHandler or JSONHandler. +// A zero HandlerOptions consists entirely of default values. +type HandlerOptions = slog.HandlerOptions + +// Keys for "built-in" attributes. +const ( + // TimeKey is the key used by the built-in handlers for the time + // when the log method is called. The associated Value is a [time.Time]. + TimeKey = slog.TimeKey + // LevelKey is the key used by the built-in handlers for the level + // of the log call. The associated value is a [Level]. + LevelKey = slog.LevelKey + // MessageKey is the key used by the built-in handlers for the + // message of the log call. The associated value is a string. + MessageKey = slog.MessageKey + // SourceKey is the key used by the built-in handlers for the source file + // and line of the log call. The associated value is a string. + SourceKey = slog.SourceKey +) diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go new file mode 100644 index 00000000..7c22bd81 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "io" + "log/slog" +) + +// JSONHandler is a Handler that writes Records to an io.Writer as +// line-delimited JSON objects. +type JSONHandler = slog.JSONHandler + +// NewJSONHandler creates a JSONHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { + return slog.NewJSONHandler(w, opts) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go new file mode 100644 index 00000000..7b14f10b --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go @@ -0,0 +1,24 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "io" + + "golang.org/x/exp/slog" +) + +// JSONHandler is a Handler that writes Records to an io.Writer as +// line-delimited JSON objects. +type JSONHandler = slog.JSONHandler + +// NewJSONHandler creates a JSONHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { + return slog.NewJSONHandler(w, opts) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/level.go b/vendor/github.com/sagikazarmark/slog-shim/level.go new file mode 100644 index 00000000..07288cf8 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/level.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "log/slog" +) + +// A Level is the importance or severity of a log event. +// The higher the level, the more important or severe the event. +type Level = slog.Level + +// Level numbers are inherently arbitrary, +// but we picked them to satisfy three constraints. +// Any system can map them to another numbering scheme if it wishes. +// +// First, we wanted the default level to be Info, Since Levels are ints, Info is +// the default value for int, zero. +// +// Second, we wanted to make it easy to use levels to specify logger verbosity. +// Since a larger level means a more severe event, a logger that accepts events +// with smaller (or more negative) level means a more verbose logger. Logger +// verbosity is thus the negation of event severity, and the default verbosity +// of 0 accepts all events at least as severe as INFO. +// +// Third, we wanted some room between levels to accommodate schemes with named +// levels between ours. For example, Google Cloud Logging defines a Notice level +// between Info and Warn. Since there are only a few of these intermediate +// levels, the gap between the numbers need not be large. Our gap of 4 matches +// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the +// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog +// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog +// does not. But those OpenTelemetry levels can still be represented as slog +// Levels by using the appropriate integers. +// +// Names for common levels. +const ( + LevelDebug Level = slog.LevelDebug + LevelInfo Level = slog.LevelInfo + LevelWarn Level = slog.LevelWarn + LevelError Level = slog.LevelError +) + +// A LevelVar is a Level variable, to allow a Handler level to change +// dynamically. +// It implements Leveler as well as a Set method, +// and it is safe for use by multiple goroutines. +// The zero LevelVar corresponds to LevelInfo. +type LevelVar = slog.LevelVar + +// A Leveler provides a Level value. +// +// As Level itself implements Leveler, clients typically supply +// a Level value wherever a Leveler is needed, such as in HandlerOptions. +// Clients who need to vary the level dynamically can provide a more complex +// Leveler implementation such as *LevelVar. +type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/vendor/github.com/sagikazarmark/slog-shim/level_120.go new file mode 100644 index 00000000..d3feb942 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/level_120.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "golang.org/x/exp/slog" +) + +// A Level is the importance or severity of a log event. +// The higher the level, the more important or severe the event. +type Level = slog.Level + +// Level numbers are inherently arbitrary, +// but we picked them to satisfy three constraints. +// Any system can map them to another numbering scheme if it wishes. +// +// First, we wanted the default level to be Info, Since Levels are ints, Info is +// the default value for int, zero. +// +// Second, we wanted to make it easy to use levels to specify logger verbosity. +// Since a larger level means a more severe event, a logger that accepts events +// with smaller (or more negative) level means a more verbose logger. Logger +// verbosity is thus the negation of event severity, and the default verbosity +// of 0 accepts all events at least as severe as INFO. +// +// Third, we wanted some room between levels to accommodate schemes with named +// levels between ours. For example, Google Cloud Logging defines a Notice level +// between Info and Warn. Since there are only a few of these intermediate +// levels, the gap between the numbers need not be large. Our gap of 4 matches +// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the +// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog +// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog +// does not. But those OpenTelemetry levels can still be represented as slog +// Levels by using the appropriate integers. +// +// Names for common levels. +const ( + LevelDebug Level = slog.LevelDebug + LevelInfo Level = slog.LevelInfo + LevelWarn Level = slog.LevelWarn + LevelError Level = slog.LevelError +) + +// A LevelVar is a Level variable, to allow a Handler level to change +// dynamically. +// It implements Leveler as well as a Set method, +// and it is safe for use by multiple goroutines. +// The zero LevelVar corresponds to LevelInfo. +type LevelVar = slog.LevelVar + +// A Leveler provides a Level value. +// +// As Level itself implements Leveler, clients typically supply +// a Level value wherever a Leveler is needed, such as in HandlerOptions. +// Clients who need to vary the level dynamically can provide a more complex +// Leveler implementation such as *LevelVar. +type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger.go b/vendor/github.com/sagikazarmark/slog-shim/logger.go new file mode 100644 index 00000000..e80036be --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/logger.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "context" + "log" + "log/slog" +) + +// Default returns the default Logger. +func Default() *Logger { return slog.Default() } + +// SetDefault makes l the default Logger. +// After this call, output from the log package's default Logger +// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. +func SetDefault(l *Logger) { + slog.SetDefault(l) +} + +// A Logger records structured information about each call to its +// Log, Debug, Info, Warn, and Error methods. +// For each call, it creates a Record and passes it to a Handler. +// +// To create a new Logger, call [New] or a Logger method +// that begins "With". +type Logger = slog.Logger + +// New creates a new Logger with the given non-nil Handler. +func New(h Handler) *Logger { + return slog.New(h) +} + +// With calls Logger.With on the default logger. +func With(args ...any) *Logger { + return slog.With(args...) +} + +// NewLogLogger returns a new log.Logger such that each call to its Output method +// dispatches a Record to the specified handler. The logger acts as a bridge from +// the older log API to newer structured logging handlers. +func NewLogLogger(h Handler, level Level) *log.Logger { + return slog.NewLogLogger(h, level) +} + +// Debug calls Logger.Debug on the default logger. +func Debug(msg string, args ...any) { + slog.Debug(msg, args...) +} + +// DebugContext calls Logger.DebugContext on the default logger. +func DebugContext(ctx context.Context, msg string, args ...any) { + slog.DebugContext(ctx, msg, args...) +} + +// Info calls Logger.Info on the default logger. +func Info(msg string, args ...any) { + slog.Info(msg, args...) +} + +// InfoContext calls Logger.InfoContext on the default logger. +func InfoContext(ctx context.Context, msg string, args ...any) { + slog.InfoContext(ctx, msg, args...) +} + +// Warn calls Logger.Warn on the default logger. +func Warn(msg string, args ...any) { + slog.Warn(msg, args...) +} + +// WarnContext calls Logger.WarnContext on the default logger. +func WarnContext(ctx context.Context, msg string, args ...any) { + slog.WarnContext(ctx, msg, args...) +} + +// Error calls Logger.Error on the default logger. +func Error(msg string, args ...any) { + slog.Error(msg, args...) +} + +// ErrorContext calls Logger.ErrorContext on the default logger. +func ErrorContext(ctx context.Context, msg string, args ...any) { + slog.ErrorContext(ctx, msg, args...) +} + +// Log calls Logger.Log on the default logger. +func Log(ctx context.Context, level Level, msg string, args ...any) { + slog.Log(ctx, level, msg, args...) +} + +// LogAttrs calls Logger.LogAttrs on the default logger. +func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { + slog.LogAttrs(ctx, level, msg, attrs...) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go new file mode 100644 index 00000000..97ebdd5e --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "context" + "log" + + "golang.org/x/exp/slog" +) + +// Default returns the default Logger. +func Default() *Logger { return slog.Default() } + +// SetDefault makes l the default Logger. +// After this call, output from the log package's default Logger +// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. +func SetDefault(l *Logger) { + slog.SetDefault(l) +} + +// A Logger records structured information about each call to its +// Log, Debug, Info, Warn, and Error methods. +// For each call, it creates a Record and passes it to a Handler. +// +// To create a new Logger, call [New] or a Logger method +// that begins "With". +type Logger = slog.Logger + +// New creates a new Logger with the given non-nil Handler. +func New(h Handler) *Logger { + return slog.New(h) +} + +// With calls Logger.With on the default logger. +func With(args ...any) *Logger { + return slog.With(args...) +} + +// NewLogLogger returns a new log.Logger such that each call to its Output method +// dispatches a Record to the specified handler. The logger acts as a bridge from +// the older log API to newer structured logging handlers. +func NewLogLogger(h Handler, level Level) *log.Logger { + return slog.NewLogLogger(h, level) +} + +// Debug calls Logger.Debug on the default logger. +func Debug(msg string, args ...any) { + slog.Debug(msg, args...) +} + +// DebugContext calls Logger.DebugContext on the default logger. +func DebugContext(ctx context.Context, msg string, args ...any) { + slog.DebugContext(ctx, msg, args...) +} + +// Info calls Logger.Info on the default logger. +func Info(msg string, args ...any) { + slog.Info(msg, args...) +} + +// InfoContext calls Logger.InfoContext on the default logger. +func InfoContext(ctx context.Context, msg string, args ...any) { + slog.InfoContext(ctx, msg, args...) +} + +// Warn calls Logger.Warn on the default logger. +func Warn(msg string, args ...any) { + slog.Warn(msg, args...) +} + +// WarnContext calls Logger.WarnContext on the default logger. +func WarnContext(ctx context.Context, msg string, args ...any) { + slog.WarnContext(ctx, msg, args...) +} + +// Error calls Logger.Error on the default logger. +func Error(msg string, args ...any) { + slog.Error(msg, args...) +} + +// ErrorContext calls Logger.ErrorContext on the default logger. +func ErrorContext(ctx context.Context, msg string, args ...any) { + slog.ErrorContext(ctx, msg, args...) +} + +// Log calls Logger.Log on the default logger. +func Log(ctx context.Context, level Level, msg string, args ...any) { + slog.Log(ctx, level, msg, args...) +} + +// LogAttrs calls Logger.LogAttrs on the default logger. +func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { + slog.LogAttrs(ctx, level, msg, attrs...) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/record.go b/vendor/github.com/sagikazarmark/slog-shim/record.go new file mode 100644 index 00000000..85ad1f78 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/record.go @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "log/slog" + "time" +) + +// A Record holds information about a log event. +// Copies of a Record share state. +// Do not modify a Record after handing out a copy to it. +// Call [NewRecord] to create a new Record. +// Use [Record.Clone] to create a copy with no shared state. +type Record = slog.Record + +// NewRecord creates a Record from the given arguments. +// Use [Record.AddAttrs] to add attributes to the Record. +// +// NewRecord is intended for logging APIs that want to support a [Handler] as +// a backend. +func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { + return slog.NewRecord(t, level, msg, pc) +} + +// Source describes the location of a line of source code. +type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/vendor/github.com/sagikazarmark/slog-shim/record_120.go new file mode 100644 index 00000000..c2eaf4e7 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/record_120.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "time" + + "golang.org/x/exp/slog" +) + +// A Record holds information about a log event. +// Copies of a Record share state. +// Do not modify a Record after handing out a copy to it. +// Call [NewRecord] to create a new Record. +// Use [Record.Clone] to create a copy with no shared state. +type Record = slog.Record + +// NewRecord creates a Record from the given arguments. +// Use [Record.AddAttrs] to add attributes to the Record. +// +// NewRecord is intended for logging APIs that want to support a [Handler] as +// a backend. +func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { + return slog.NewRecord(t, level, msg, pc) +} + +// Source describes the location of a line of source code. +type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go new file mode 100644 index 00000000..45f6cfcb --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "io" + "log/slog" +) + +// TextHandler is a Handler that writes Records to an io.Writer as a +// sequence of key=value pairs separated by spaces and followed by a newline. +type TextHandler = slog.TextHandler + +// NewTextHandler creates a TextHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { + return slog.NewTextHandler(w, opts) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go new file mode 100644 index 00000000..a69d63cc --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go @@ -0,0 +1,24 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "io" + + "golang.org/x/exp/slog" +) + +// TextHandler is a Handler that writes Records to an io.Writer as a +// sequence of key=value pairs separated by spaces and followed by a newline. +type TextHandler = slog.TextHandler + +// NewTextHandler creates a TextHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { + return slog.NewTextHandler(w, opts) +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/value.go b/vendor/github.com/sagikazarmark/slog-shim/value.go new file mode 100644 index 00000000..61173eb9 --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/value.go @@ -0,0 +1,109 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package slog + +import ( + "log/slog" + "time" +) + +// A Value can represent any Go value, but unlike type any, +// it can represent most small values without an allocation. +// The zero Value corresponds to nil. +type Value = slog.Value + +// Kind is the kind of a Value. +type Kind = slog.Kind + +// The following list is sorted alphabetically, but it's also important that +// KindAny is 0 so that a zero Value represents nil. +const ( + KindAny = slog.KindAny + KindBool = slog.KindBool + KindDuration = slog.KindDuration + KindFloat64 = slog.KindFloat64 + KindInt64 = slog.KindInt64 + KindString = slog.KindString + KindTime = slog.KindTime + KindUint64 = slog.KindUint64 + KindGroup = slog.KindGroup + KindLogValuer = slog.KindLogValuer +) + +//////////////// Constructors + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + return slog.StringValue(value) +} + +// IntValue returns a Value for an int. +func IntValue(v int) Value { + return slog.IntValue(v) +} + +// Int64Value returns a Value for an int64. +func Int64Value(v int64) Value { + return slog.Int64Value(v) +} + +// Uint64Value returns a Value for a uint64. +func Uint64Value(v uint64) Value { + return slog.Uint64Value(v) +} + +// Float64Value returns a Value for a floating-point number. +func Float64Value(v float64) Value { + return slog.Float64Value(v) +} + +// BoolValue returns a Value for a bool. +func BoolValue(v bool) Value { + return slog.BoolValue(v) +} + +// TimeValue returns a Value for a time.Time. +// It discards the monotonic portion. +func TimeValue(v time.Time) Value { + return slog.TimeValue(v) +} + +// DurationValue returns a Value for a time.Duration. +func DurationValue(v time.Duration) Value { + return slog.DurationValue(v) +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + return slog.GroupValue(as...) +} + +// AnyValue returns a Value for the supplied value. +// +// If the supplied value is of type Value, it is returned +// unmodified. +// +// Given a value of one of Go's predeclared string, bool, or +// (non-complex) numeric types, AnyValue returns a Value of kind +// String, Bool, Uint64, Int64, or Float64. The width of the +// original numeric type is not preserved. +// +// Given a time.Time or time.Duration value, AnyValue returns a Value of kind +// KindTime or KindDuration. The monotonic time is not preserved. +// +// For nil, or values of all other types, including named types whose +// underlying type is numeric, AnyValue returns a value of kind KindAny. +func AnyValue(v any) Value { + return slog.AnyValue(v) +} + +// A LogValuer is any Go value that can convert itself into a Value for logging. +// +// This mechanism may be used to defer expensive operations until they are +// needed, or to expand a single value into a sequence of components. +type LogValuer = slog.LogValuer diff --git a/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/vendor/github.com/sagikazarmark/slog-shim/value_120.go new file mode 100644 index 00000000..0f9f871e --- /dev/null +++ b/vendor/github.com/sagikazarmark/slog-shim/value_120.go @@ -0,0 +1,110 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package slog + +import ( + "time" + + "golang.org/x/exp/slog" +) + +// A Value can represent any Go value, but unlike type any, +// it can represent most small values without an allocation. +// The zero Value corresponds to nil. +type Value = slog.Value + +// Kind is the kind of a Value. +type Kind = slog.Kind + +// The following list is sorted alphabetically, but it's also important that +// KindAny is 0 so that a zero Value represents nil. +const ( + KindAny = slog.KindAny + KindBool = slog.KindBool + KindDuration = slog.KindDuration + KindFloat64 = slog.KindFloat64 + KindInt64 = slog.KindInt64 + KindString = slog.KindString + KindTime = slog.KindTime + KindUint64 = slog.KindUint64 + KindGroup = slog.KindGroup + KindLogValuer = slog.KindLogValuer +) + +//////////////// Constructors + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + return slog.StringValue(value) +} + +// IntValue returns a Value for an int. +func IntValue(v int) Value { + return slog.IntValue(v) +} + +// Int64Value returns a Value for an int64. +func Int64Value(v int64) Value { + return slog.Int64Value(v) +} + +// Uint64Value returns a Value for a uint64. +func Uint64Value(v uint64) Value { + return slog.Uint64Value(v) +} + +// Float64Value returns a Value for a floating-point number. +func Float64Value(v float64) Value { + return slog.Float64Value(v) +} + +// BoolValue returns a Value for a bool. +func BoolValue(v bool) Value { + return slog.BoolValue(v) +} + +// TimeValue returns a Value for a time.Time. +// It discards the monotonic portion. +func TimeValue(v time.Time) Value { + return slog.TimeValue(v) +} + +// DurationValue returns a Value for a time.Duration. +func DurationValue(v time.Duration) Value { + return slog.DurationValue(v) +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + return slog.GroupValue(as...) +} + +// AnyValue returns a Value for the supplied value. +// +// If the supplied value is of type Value, it is returned +// unmodified. +// +// Given a value of one of Go's predeclared string, bool, or +// (non-complex) numeric types, AnyValue returns a Value of kind +// String, Bool, Uint64, Int64, or Float64. The width of the +// original numeric type is not preserved. +// +// Given a time.Time or time.Duration value, AnyValue returns a Value of kind +// KindTime or KindDuration. The monotonic time is not preserved. +// +// For nil, or values of all other types, including named types whose +// underlying type is numeric, AnyValue returns a value of kind KindAny. +func AnyValue(v any) Value { + return slog.AnyValue(v) +} + +// A LogValuer is any Go value that can convert itself into a Value for logging. +// +// This mechanism may be used to defer expensive operations until they are +// needed, or to expand a single value into a sequence of components. +type LogValuer = slog.LogValuer diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index cb25b437..915d5090 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -120,7 +120,7 @@ func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, d // Restore the prefix and suffix. if len(commonprefix) != 0 { - diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...) } if len(commonsuffix) != 0 { diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) @@ -165,8 +165,8 @@ func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, dea // Single character string. // After the previous speedup, the character can't be an equality. return []Diff{ - Diff{DiffDelete, string(text1)}, - Diff{DiffInsert, string(text2)}, + {DiffDelete, string(text1)}, + {DiffInsert, string(text2)}, } // Check to see if the problem can be split in two. } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { @@ -193,7 +193,7 @@ func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, dea // diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { // Scan the text on a line-by-line basis first. - text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2)) diffs := dmp.diffMainRunes(text1, text2, false, deadline) @@ -368,8 +368,8 @@ func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) } // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. return []Diff{ - Diff{DiffDelete, string(runes1)}, - Diff{DiffInsert, string(runes2)}, + {DiffDelete, string(runes1)}, + {DiffInsert, string(runes2)}, } } @@ -390,66 +390,25 @@ func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, // DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. // It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { - chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) - return string(chars1), string(chars2), lineArray -} - -// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. -func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. - lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' - lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 - - chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) - chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) - + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) return chars1, chars2, lineArray } -func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { - return dmp.DiffLinesToRunes(string(text1), string(text2)) -} - -// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. -// We use strings instead of []runes as input mainly because you can't use []rune as a map key. -func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. - lineStart := 0 - lineEnd := -1 - runes := []rune{} - - for lineEnd < len(text)-1 { - lineEnd = indexOf(text, "\n", lineStart) - - if lineEnd == -1 { - lineEnd = len(text) - 1 - } - - line := text[lineStart : lineEnd+1] - lineStart = lineEnd + 1 - lineValue, ok := lineHash[line] - - if ok { - runes = append(runes, rune(lineValue)) - } else { - *lineArray = append(*lineArray, line) - lineHash[line] = len(*lineArray) - 1 - runes = append(runes, rune(len(*lineArray)-1)) - } - } - - return runes +// DiffLinesToRunes splits two texts into a list of runes. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return []rune(chars1), []rune(chars2), lineArray } // DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { hydrated := make([]Diff, 0, len(diffs)) for _, aDiff := range diffs { - chars := aDiff.Text - text := make([]string, len(chars)) + runes := []rune(aDiff.Text) + text := make([]string, len(runes)) - for i, r := range chars { - text[i] = lineArray[r] + for i, r := range runes { + text[i] = lineArray[runeToInt(r)] } aDiff.Text = strings.Join(text, "") @@ -670,16 +629,16 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { // An insertion or deletion. if diffs[pointer].Type == DiffInsert { - lengthInsertions2 += len(diffs[pointer].Text) + lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text) } else { - lengthDeletions2 += len(diffs[pointer].Text) + lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text) } // Eliminate an equality that is smaller or equal to the edits on both sides of it. difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) - if len(lastequality) > 0 && - (len(lastequality) <= difference1) && - (len(lastequality) <= difference2) { + if utf8.RuneCountInString(lastequality) > 0 && + (utf8.RuneCountInString(lastequality) <= difference1) && + (utf8.RuneCountInString(lastequality) <= difference2) { // Duplicate record. insPoint := equalities[len(equalities)-1] diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) @@ -728,8 +687,8 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) if overlapLength1 >= overlapLength2 { - if float64(overlapLength1) >= float64(len(deletion))/2 || - float64(overlapLength1) >= float64(len(insertion))/2 { + if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 { // Overlap found. Insert an equality and trim the surrounding edits. diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) @@ -739,8 +698,8 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { pointer++ } } else { - if float64(overlapLength2) >= float64(len(deletion))/2 || - float64(overlapLength2) >= float64(len(insertion))/2 { + if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 { // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. overlap := Diff{DiffEqual, deletion[:overlapLength2]} diffs = splice(diffs, pointer, 0, overlap) @@ -1029,7 +988,7 @@ func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { if x > 0 && diffs[x-1].Type == DiffEqual { diffs[x-1].Text += string(textInsert[:commonlength]) } else { - diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...) pointer++ } textInsert = textInsert[commonlength:] @@ -1343,3 +1302,46 @@ func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Di return diffs, nil } + +// diffLinesToStrings splits two texts into a list of strings. Each string represents one line. +func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + + lineHash := make(map[string]int) + //Each string has the index of lineArray which it points to + strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray, lineHash) + strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray, lineHash) + + return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray +} + +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []uint32 { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + strs := []uint32{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + strs = append(strs, uint32(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + strs = append(strs, uint32(len(*lineArray)-1)) + } + } + + return strs +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go index 223c43c4..0dbe3bdd 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -324,7 +324,7 @@ func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { paddingLength := dmp.PatchMargin nullPadding := "" for x := 1; x <= paddingLength; x++ { - nullPadding += string(x) + nullPadding += string(rune(x)) } // Bump all the patches forward. diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index 265f29cc..eb727bb5 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -9,10 +9,16 @@ package diffmatchpatch import ( + "fmt" "strings" "unicode/utf8" ) +const UNICODE_INVALID_RANGE_START = 0xD800 +const UNICODE_INVALID_RANGE_END = 0xDFFF +const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1 +const UNICODE_RANGE_MAX = 0x10FFFF + // unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. // In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. var unescaper = strings.NewReplacer( @@ -86,3 +92,99 @@ func runesIndex(r1, r2 []rune) int { } return -1 } + +func intArrayToString(ns []uint32) string { + if len(ns) == 0 { + return "" + } + + b := []rune{} + for _, n := range ns { + b = append(b, intToRune(n)) + } + return string(b) +} + +// These constants define the number of bits representable +// in 1,2,3,4 byte utf8 sequences, respectively. +const ONE_BYTE_BITS = 7 +const TWO_BYTE_BITS = 11 +const THREE_BYTE_BITS = 16 +const FOUR_BYTE_BITS = 21 + +// Helper for getting a sequence of bits from an integer. +func getBits(i uint32, cnt byte, from byte) byte { + return byte((i >> from) & ((1 << cnt) - 1)) +} + +// Converts an integer in the range 0~1112060 into a rune. +// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8 +func intToRune(i uint32) rune { + if i < (1 << ONE_BYTE_BITS) { + return rune(i) + } + + if i < (1 << TWO_BYTE_BITS) { + r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 2 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i)) + } + return r + } + + // Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range + // was returning utf8.RuneError during encoding. + if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) { + if i >= UNICODE_INVALID_RANGE_START { + i += UNICODE_INVALID_RANGE_DELTA + } + + r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 3 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i)) + } + return r + } + + if i < (1<= UNICODE_INVALID_RANGE_END { + return result - UNICODE_INVALID_RANGE_DELTA + } + + return result + } + + if size == 4 { + result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111) + return result - UNICODE_INVALID_RANGE_DELTA - 3 + } + + panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size)) +} diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa..d1d4a85f 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are @@ -341,7 +343,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec0..c7787f77 100644 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa..71cdbbc3 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 33770445..5ff0aef6 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1..074fd4b8 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/skeema/knownhosts/BUILD.bazel b/vendor/github.com/skeema/knownhosts/BUILD.bazel new file mode 100644 index 00000000..bbbbdf15 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "knownhosts", + srcs = ["knownhosts.go"], + importmap = "peridot.resf.org/vendor/github.com/skeema/knownhosts", + importpath = "github.com/skeema/knownhosts", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/crypto/ssh", + "//vendor/golang.org/x/crypto/ssh/knownhosts", + ], +) diff --git a/vendor/github.com/skeema/knownhosts/LICENSE b/vendor/github.com/skeema/knownhosts/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE new file mode 100644 index 00000000..a92cb34d --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -0,0 +1,13 @@ +Copyright 2024 Skeema LLC and the Skeema Knownhosts authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md new file mode 100644 index 00000000..36b84761 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -0,0 +1,117 @@ +# knownhosts: enhanced Golang SSH known_hosts management + +[![build status](https://img.shields.io/github/actions/workflow/status/skeema/knownhosts/tests.yml?branch=main)](https://github.com/skeema/knownhosts/actions) +[![godoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/skeema/knownhosts) + + +> This repo is brought to you by [Skeema](https://github.com/skeema/skeema), a +> declarative pure-SQL schema management system for MySQL and MariaDB. Our +> premium products include extensive [SSH tunnel](https://www.skeema.io/docs/options/#ssh) +> functionality, which internally makes use of this package. + +Go provides excellent functionality for OpenSSH known_hosts files in its +external package [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). +However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to command-line `ssh`'s behavior for `StrictHostKeyChecking=no` configuration. + +This repo ([github.com/skeema/knownhosts](https://github.com/skeema/knownhosts)) is a thin wrapper package around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following functionality: + +* Look up known_hosts public keys for any given host +* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286) +* Write new known_hosts entries to an io.Writer +* Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463) +* Determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet + +## How host key lookup works + +Although [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) doesn't directly expose a way to query its known_host map, we use a subtle trick to do so: invoke the HostKeyCallback with a valid host but a bogus key. The resulting KeyError allows us to determine which public keys are actually present for that host. + +By using this technique, [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) doesn't need to duplicate or re-implement any of the actual known_hosts management from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). + +## Populating ssh.ClientConfig.HostKeyAlgorithms based on known_hosts + +Hosts often have multiple public keys, each of a different type (algorithm). This can be [problematic](https://github.com/golang/go/issues/29286) in [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts): if a host's first public key is *not* in known_hosts, but a key of a different type *is*, the HostKeyCallback returns an error. The solution is to populate `ssh.ClientConfig.HostKeyAlgorithms` based on the algorithms of the known_hosts entries for that host, but +[golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) +does not provide an obvious way to do so. + +This package uses its host key lookup trick in order to make ssh.ClientConfig.HostKeyAlgorithms easy to populate: + +```golang +import ( + "golang.org/x/crypto/ssh" + "github.com/skeema/knownhosts" +) + +func sshConfigForHost(hostWithPort string) (*ssh.ClientConfig, error) { + kh, err := knownhosts.New("/home/myuser/.ssh/known_hosts") + if err != nil { + return nil, err + } + config := &ssh.ClientConfig{ + User: "myuser", + Auth: []ssh.AuthMethod{ /* ... */ }, + HostKeyCallback: kh.HostKeyCallback(), // or, equivalently, use ssh.HostKeyCallback(kh) + HostKeyAlgorithms: kh.HostKeyAlgorithms(hostWithPort), + } + return config, nil +} +``` + +## Writing new known_hosts entries + +If you wish to mimic the behavior of OpenSSH's `StrictHostKeyChecking=no` or `StrictHostKeyChecking=ask`, this package provides a few functions to simplify this task. For example: + +```golang +sshHost := "yourserver.com:22" +khPath := "/home/myuser/.ssh/known_hosts" +kh, err := knownhosts.New(khPath) +if err != nil { + log.Fatal("Failed to read known_hosts: ", err) +} + +// Create a custom permissive hostkey callback which still errors on hosts +// with changed keys, but allows unknown hosts and adds them to known_hosts +cb := ssh.HostKeyCallback(func(hostname string, remote net.Addr, key ssh.PublicKey) error { + err := kh(hostname, remote, key) + if knownhosts.IsHostKeyChanged(err) { + return fmt.Errorf("REMOTE HOST IDENTIFICATION HAS CHANGED for host %s! This may indicate a MitM attack.", hostname) + } else if knownhosts.IsHostUnknown(err) { + f, ferr := os.OpenFile(khPath, os.O_APPEND|os.O_WRONLY, 0600) + if ferr == nil { + defer f.Close() + ferr = knownhosts.WriteKnownHost(f, hostname, remote, key) + } + if ferr == nil { + log.Printf("Added host %s to known_hosts\n", hostname) + } else { + log.Printf("Failed to add host %s to known_hosts: %v\n", hostname, ferr) + } + return nil // permit previously-unknown hosts (warning: may be insecure) + } + return err +}) + +config := &ssh.ClientConfig{ + User: "myuser", + Auth: []ssh.AuthMethod{ /* ... */ }, + HostKeyCallback: cb, + HostKeyAlgorithms: kh.HostKeyAlgorithms(sshHost), +} +``` + +## License + +**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** + +```text +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +``` diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go new file mode 100644 index 00000000..4dad7771 --- /dev/null +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -0,0 +1,192 @@ +// Package knownhosts is a thin wrapper around golang.org/x/crypto/ssh/knownhosts, +// adding the ability to obtain the list of host key algorithms for a known host. +package knownhosts + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "sort" + "strings" + + "golang.org/x/crypto/ssh" + xknownhosts "golang.org/x/crypto/ssh/knownhosts" +) + +// HostKeyCallback wraps ssh.HostKeyCallback with an additional method to +// perform host key algorithm lookups from the known_hosts entries. +type HostKeyCallback ssh.HostKeyCallback + +// New creates a host key callback from the given OpenSSH host key files. The +// returned value may be used in ssh.ClientConfig.HostKeyCallback by casting it +// to ssh.HostKeyCallback, or using its HostKeyCallback method. Otherwise, it +// operates the same as the New function in golang.org/x/crypto/ssh/knownhosts. +func New(files ...string) (HostKeyCallback, error) { + cb, err := xknownhosts.New(files...) + return HostKeyCallback(cb), err +} + +// HostKeyCallback simply casts the receiver back to ssh.HostKeyCallback, for +// use in ssh.ClientConfig.HostKeyCallback. +func (hkcb HostKeyCallback) HostKeyCallback() ssh.HostKeyCallback { + return ssh.HostKeyCallback(hkcb) +} + +// HostKeys returns a slice of known host public keys for the supplied host:port +// found in the known_hosts file(s), or an empty slice if the host is not +// already known. For hosts that have multiple known_hosts entries (for +// different key types), the result will be sorted by known_hosts filename and +// line number. +func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey) { + var keyErr *xknownhosts.KeyError + placeholderAddr := &net.TCPAddr{IP: []byte{0, 0, 0, 0}} + placeholderPubKey := &fakePublicKey{} + var kkeys []xknownhosts.KnownKey + if hkcbErr := hkcb(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) { + kkeys = append(kkeys, keyErr.Want...) + knownKeyLess := func(i, j int) bool { + if kkeys[i].Filename < kkeys[j].Filename { + return true + } + return (kkeys[i].Filename == kkeys[j].Filename && kkeys[i].Line < kkeys[j].Line) + } + sort.Slice(kkeys, knownKeyLess) + keys = make([]ssh.PublicKey, len(kkeys)) + for n := range kkeys { + keys[n] = kkeys[n].Key + } + } + return keys +} + +// HostKeyAlgorithms returns a slice of host key algorithms for the supplied +// host:port found in the known_hosts file(s), or an empty slice if the host +// is not already known. The result may be used in ssh.ClientConfig's +// HostKeyAlgorithms field, either as-is or after filtering (if you wish to +// ignore or prefer particular algorithms). For hosts that have multiple +// known_hosts entries (for different key types), the result will be sorted by +// known_hosts filename and line number. +func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) { + // We ensure that algos never contains duplicates. This is done for robustness + // even though currently golang.org/x/crypto/ssh/knownhosts never exposes + // multiple keys of the same type. This way our behavior here is unaffected + // even if https://github.com/golang/go/issues/28870 is implemented, for + // example by https://github.com/golang/crypto/pull/254. + hostKeys := hkcb.HostKeys(hostWithPort) + seen := make(map[string]struct{}, len(hostKeys)) + addAlgo := func(typ string) { + if _, already := seen[typ]; !already { + algos = append(algos, typ) + seen[typ] = struct{}{} + } + } + for _, key := range hostKeys { + typ := key.Type() + if typ == ssh.KeyAlgoRSA { + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, + // not public key formats, so they can't appear as a PublicKey.Type. + // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + addAlgo(ssh.KeyAlgoRSASHA512) + addAlgo(ssh.KeyAlgoRSASHA256) + } + addAlgo(typ) + } + return algos +} + +// HostKeyAlgorithms is a convenience function for performing host key algorithm +// lookups on an ssh.HostKeyCallback directly. It is intended for use in code +// paths that stay with the New method of golang.org/x/crypto/ssh/knownhosts +// rather than this package's New method. +func HostKeyAlgorithms(cb ssh.HostKeyCallback, hostWithPort string) []string { + return HostKeyCallback(cb).HostKeyAlgorithms(hostWithPort) +} + +// IsHostKeyChanged returns a boolean indicating whether the error indicates +// the host key has changed. It is intended to be called on the error returned +// from invoking a HostKeyCallback to check whether an SSH host is known. +func IsHostKeyChanged(err error) bool { + var keyErr *xknownhosts.KeyError + return errors.As(err, &keyErr) && len(keyErr.Want) > 0 +} + +// IsHostUnknown returns a boolean indicating whether the error represents an +// unknown host. It is intended to be called on the error returned from invoking +// a HostKeyCallback to check whether an SSH host is known. +func IsHostUnknown(err error) bool { + var keyErr *xknownhosts.KeyError + return errors.As(err, &keyErr) && len(keyErr.Want) == 0 +} + +// Normalize normalizes an address into the form used in known_hosts. This +// implementation includes a fix for https://github.com/golang/go/issues/53463 +// and will omit brackets around ipv6 addresses on standard port 22. +func Normalize(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + host = address + port = "22" + } + entry := host + if port != "22" { + entry = "[" + entry + "]:" + port + } else if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + entry = entry[1 : len(entry)-1] + } + return entry +} + +// Line returns a line to append to the known_hosts files. This implementation +// uses the local patched implementation of Normalize in order to solve +// https://github.com/golang/go/issues/53463. +func Line(addresses []string, key ssh.PublicKey) string { + var trimmed []string + for _, a := range addresses { + trimmed = append(trimmed, Normalize(a)) + } + + return strings.Join([]string{ + strings.Join(trimmed, ","), + key.Type(), + base64.StdEncoding.EncodeToString(key.Marshal()), + }, " ") +} + +// WriteKnownHost writes a known_hosts line to writer for the supplied hostname, +// remote, and key. This is useful when writing a custom hostkey callback which +// wraps a callback obtained from knownhosts.New to provide additional +// known_hosts management functionality. The hostname, remote, and key typically +// correspond to the callback's args. +func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error { + // Always include hostname; only also include remote if it isn't a zero value + // and doesn't normalize to the same string as hostname. + hostnameNormalized := Normalize(hostname) + if strings.ContainsAny(hostnameNormalized, "\t ") { + return fmt.Errorf("knownhosts: hostname '%s' contains spaces", hostnameNormalized) + } + addresses := []string{hostnameNormalized} + remoteStrNormalized := Normalize(remote.String()) + if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != hostnameNormalized && + !strings.ContainsAny(remoteStrNormalized, "\t ") { + addresses = append(addresses, remoteStrNormalized) + } + line := Line(addresses, key) + "\n" + _, err := w.Write([]byte(line)) + return err +} + +// fakePublicKey is used as part of the work-around for +// https://github.com/golang/go/issues/29286 +type fakePublicKey struct{} + +func (fakePublicKey) Type() string { + return "fake-public-key" +} +func (fakePublicKey) Marshal() []byte { + return []byte("fake public key") +} +func (fakePublicKey) Verify(_ []byte, _ *ssh.Signature) error { + return errors.New("Verify called on placeholder key") +} diff --git a/vendor/github.com/sourcegraph/conc/.golangci.yml b/vendor/github.com/sourcegraph/conc/.golangci.yml new file mode 100644 index 00000000..ae65a760 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/.golangci.yml @@ -0,0 +1,11 @@ +linters: + disable-all: true + enable: + - errcheck + - godot + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused diff --git a/vendor/github.com/sourcegraph/conc/BUILD.bazel b/vendor/github.com/sourcegraph/conc/BUILD.bazel new file mode 100644 index 00000000..9c53821d --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "conc", + srcs = ["waitgroup.go"], + importmap = "peridot.resf.org/vendor/github.com/sourcegraph/conc", + importpath = "github.com/sourcegraph/conc", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/sourcegraph/conc/panics"], +) diff --git a/vendor/github.com/sourcegraph/conc/LICENSE b/vendor/github.com/sourcegraph/conc/LICENSE new file mode 100644 index 00000000..1081f4ef --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Sourcegraph + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sourcegraph/conc/README.md b/vendor/github.com/sourcegraph/conc/README.md new file mode 100644 index 00000000..1c87c3c9 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/README.md @@ -0,0 +1,464 @@ +![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) + +# `conc`: better structured concurrency for go + +[![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) +[![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) +[![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) +[![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) + +`conc` is your toolbelt for structured concurrency in go, making common tasks +easier and safer. + +```sh +go get github.com/sourcegraph/conc +``` + +# At a glance + +- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` +- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner +- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results +- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible +- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure +- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks +- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice +- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice +- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines + +All pools are created with +[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) +or +[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), +then configured with methods: + +- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool +- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors +- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error +- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error +- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored + +# Goals + +The main goals of the package are: +1) Make it harder to leak goroutines +2) Handle panics gracefully +3) Make concurrent code easier to read + +## Goal #1: Make it harder to leak goroutines + +A common pain point when working with goroutines is cleaning them up. It's +really easy to fire off a `go` statement and fail to properly wait for it to +complete. + +`conc` takes the opinionated stance that all concurrency should be scoped. +That is, goroutines should have an owner and that owner should always +ensure that its owned goroutines exit properly. + +In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines +are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and +`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out +of scope. + +In some cases, you might want a spawned goroutine to outlast the scope of the +caller. In that case, you could pass a `WaitGroup` into the spawning function. + +```go +func main() { + var wg conc.WaitGroup + defer wg.Wait() + + startTheThing(&wg) +} + +func startTheThing(wg *conc.WaitGroup) { + wg.Go(func() { ... }) +} +``` + +For some more discussion on why scoped concurrency is nice, check out [this +blog +post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). + +## Goal #2: Handle panics gracefully + +A frequent problem with goroutines in long-running applications is handling +panics. A goroutine spawned without a panic handler will crash the whole process +on panic. This is usually undesirable. + +However, if you do add a panic handler to a goroutine, what do you do with the +panic once you catch it? Some options: +1) Ignore it +2) Log it +3) Turn it into an error and return that to the goroutine spawner +4) Propagate the panic to the goroutine spawner + +Ignoring panics is a bad idea since panics usually mean there is actually +something wrong and someone should fix it. + +Just logging panics isn't great either because then there is no indication to the spawner +that something bad happened, and it might just continue on as normal even though your +program is in a really bad state. + +Both (3) and (4) are reasonable options, but both require the goroutine to have +an owner that can actually receive the message that something went wrong. This +is generally not true with a goroutine spawned with `go`, but in the `conc` +package, all goroutines have an owner that must collect the spawned goroutine. +In the conc package, any call to `Wait()` will panic if any of the spawned goroutines +panicked. Additionally, it decorates the panic value with a stacktrace from the child +goroutine so that you don't lose information about what caused the panic. + +Doing this all correctly every time you spawn something with `go` is not +trivial and it requires a lot of boilerplate that makes the important parts of +the code more difficult to read, so `conc` does this for you. + + + + + + + + + + +
stdlibconc
+ +```go +type caughtPanicError struct { + val any + stack []byte +} + +func (e *caughtPanicError) Error() string { + return fmt.Sprintf( + "panic: %q\n%s", + e.val, + string(e.stack) + ) +} + +func main() { + done := make(chan error) + go func() { + defer func() { + if v := recover(); v != nil { + done <- &caughtPanicError{ + val: v, + stack: debug.Stack() + } + } else { + done <- nil + } + }() + doSomethingThatMightPanic() + }() + err := <-done + if err != nil { + panic(err) + } +} +``` + + +```go +func main() { + var wg conc.WaitGroup + wg.Go(doSomethingThatMightPanic) + // panics with a nice stacktrace + wg.Wait() +} +``` +
+ +## Goal #3: Make concurrent code easier to read + +Doing concurrency correctly is difficult. Doing it in a way that doesn't +obfuscate what the code is actually doing is more difficult. The `conc` package +attempts to make common operations easier by abstracting as much boilerplate +complexity as possible. + +Want to run a set of concurrent tasks with a bounded set of goroutines? Use +`pool.New()`. Want to process an ordered stream of results concurrently, but +still maintain order? Try `stream.New()`. What about a concurrent map over +a slice? Take a peek at `iter.Map()`. + +Browse some examples below for some comparisons with doing these by hand. + +# Examples + +Each of these examples forgoes propagating panics for simplicity. To see +what kind of complexity that would add, check out the "Goal #2" header above. + +Spawn a set of goroutines and waiting for them to finish: + + + + + + + + + + +
stdlibconc
+ +```go +func main() { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // crashes on panic! + doSomething() + }() + } + wg.Wait() +} +``` + + +```go +func main() { + var wg conc.WaitGroup + for i := 0; i < 10; i++ { + wg.Go(doSomething) + } + wg.Wait() +} +``` +
+ +Process each element of a stream in a static pool of goroutines: + + + + + + + + + + +
stdlibconc
+ +```go +func process(stream chan int) { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for elem := range stream { + handle(elem) + } + }() + } + wg.Wait() +} +``` + + +```go +func process(stream chan int) { + p := pool.New().WithMaxGoroutines(10) + for elem := range stream { + elem := elem + p.Go(func() { + handle(elem) + }) + } + p.Wait() +} +``` +
+ +Process each element of a slice in a static pool of goroutines: + + + + + + + + + + +
stdlibconc
+ +```go +func process(values []int) { + feeder := make(chan int, 8) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for elem := range feeder { + handle(elem) + } + }() + } + + for _, value := range values { + feeder <- value + } + close(feeder) + wg.Wait() +} +``` + + +```go +func process(values []int) { + iter.ForEach(values, handle) +} +``` +
+ +Concurrently map a slice: + + + + + + + + + + +
stdlibconc
+ +```go +func concMap( + input []int, + f func(int) int, +) []int { + res := make([]int, len(input)) + var idx atomic.Int64 + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for { + i := int(idx.Add(1) - 1) + if i >= len(input) { + return + } + + res[i] = f(input[i]) + } + }() + } + wg.Wait() + return res +} +``` + + +```go +func concMap( + input []int, + f func(*int) int, +) []int { + return iter.Map(input, f) +} +``` +
+ +Process an ordered stream concurrently: + + + + + + + + + + + +
stdlibconc
+ +```go +func mapStream( + in chan int, + out chan int, + f func(int) int, +) { + tasks := make(chan func()) + taskResults := make(chan chan int) + + // Worker goroutines + var workerWg sync.WaitGroup + for i := 0; i < 10; i++ { + workerWg.Add(1) + go func() { + defer workerWg.Done() + for task := range tasks { + task() + } + }() + } + + // Ordered reader goroutines + var readerWg sync.WaitGroup + readerWg.Add(1) + go func() { + defer readerWg.Done() + for result := range taskResults { + item := <-result + out <- item + } + }() + + // Feed the workers with tasks + for elem := range in { + resultCh := make(chan int, 1) + taskResults <- resultCh + tasks <- func() { + resultCh <- f(elem) + } + } + + // We've exhausted input. + // Wait for everything to finish + close(tasks) + workerWg.Wait() + close(taskResults) + readerWg.Wait() +} +``` + + +```go +func mapStream( + in chan int, + out chan int, + f func(int) int, +) { + s := stream.New().WithMaxGoroutines(10) + for elem := range in { + elem := elem + s.Go(func() stream.Callback { + res := f(elem) + return func() { out <- res } + }) + } + s.Wait() +} +``` +
+ +# Status + +This package is currently pre-1.0. There are likely to be minor breaking +changes before a 1.0 release as we stabilize the APIs and tweak defaults. +Please open an issue if you have questions, concerns, or requests that you'd +like addressed before the 1.0 release. Currently, a 1.0 is targeted for +March 2023. diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/BUILD.bazel b/vendor/github.com/sourcegraph/conc/internal/multierror/BUILD.bazel new file mode 100644 index 00000000..fdd837f0 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/internal/multierror/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "multierror", + srcs = [ + "multierror_go119.go", + "multierror_go120.go", + ], + importmap = "peridot.resf.org/vendor/github.com/sourcegraph/conc/internal/multierror", + importpath = "github.com/sourcegraph/conc/internal/multierror", + visibility = ["//vendor/github.com/sourcegraph/conc:__subpackages__"], + deps = ["//vendor/go.uber.org/multierr"], +) diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go new file mode 100644 index 00000000..7087e32a --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go @@ -0,0 +1,10 @@ +//go:build !go1.20 +// +build !go1.20 + +package multierror + +import "go.uber.org/multierr" + +var ( + Join = multierr.Combine +) diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go new file mode 100644 index 00000000..39cff829 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go @@ -0,0 +1,10 @@ +//go:build go1.20 +// +build go1.20 + +package multierror + +import "errors" + +var ( + Join = errors.Join +) diff --git a/vendor/github.com/sourcegraph/conc/iter/BUILD.bazel b/vendor/github.com/sourcegraph/conc/iter/BUILD.bazel new file mode 100644 index 00000000..a0d3c66f --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/iter/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "iter", + srcs = [ + "iter.go", + "map.go", + ], + importmap = "peridot.resf.org/vendor/github.com/sourcegraph/conc/iter", + importpath = "github.com/sourcegraph/conc/iter", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/sourcegraph/conc", + "//vendor/github.com/sourcegraph/conc/internal/multierror", + ], +) diff --git a/vendor/github.com/sourcegraph/conc/iter/iter.go b/vendor/github.com/sourcegraph/conc/iter/iter.go new file mode 100644 index 00000000..124b4f94 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/iter/iter.go @@ -0,0 +1,85 @@ +package iter + +import ( + "runtime" + "sync/atomic" + + "github.com/sourcegraph/conc" +) + +// defaultMaxGoroutines returns the default maximum number of +// goroutines to use within this package. +func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } + +// Iterator can be used to configure the behaviour of ForEach +// and ForEachIdx. The zero value is safe to use with reasonable +// defaults. +// +// Iterator is also safe for reuse and concurrent use. +type Iterator[T any] struct { + // MaxGoroutines controls the maximum number of goroutines + // to use on this Iterator's methods. + // + // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). + MaxGoroutines int +} + +// ForEach executes f in parallel over each element in input. +// +// It is safe to mutate the input parameter, which makes it +// possible to map in place. +// +// ForEach always uses at most runtime.GOMAXPROCS goroutines. +// It takes roughly 2µs to start up the goroutines and adds +// an overhead of roughly 50ns per element of input. For +// a configurable goroutine limit, use a custom Iterator. +func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } + +// ForEach executes f in parallel over each element in input, +// using up to the Iterator's configured maximum number of +// goroutines. +// +// It is safe to mutate the input parameter, which makes it +// possible to map in place. +// +// It takes roughly 2µs to start up the goroutines and adds +// an overhead of roughly 50ns per element of input. +func (iter Iterator[T]) ForEach(input []T, f func(*T)) { + iter.ForEachIdx(input, func(_ int, t *T) { + f(t) + }) +} + +// ForEachIdx is the same as ForEach except it also provides the +// index of the element to the callback. +func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } + +// ForEachIdx is the same as ForEach except it also provides the +// index of the element to the callback. +func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { + if iter.MaxGoroutines == 0 { + // iter is a value receiver and is hence safe to mutate + iter.MaxGoroutines = defaultMaxGoroutines() + } + + numInput := len(input) + if iter.MaxGoroutines > numInput { + // No more concurrent tasks than the number of input items. + iter.MaxGoroutines = numInput + } + + var idx atomic.Int64 + // Create the task outside the loop to avoid extra closure allocations. + task := func() { + i := int(idx.Add(1) - 1) + for ; i < numInput; i = int(idx.Add(1) - 1) { + f(i, &input[i]) + } + } + + var wg conc.WaitGroup + for i := 0; i < iter.MaxGoroutines; i++ { + wg.Go(task) + } + wg.Wait() +} diff --git a/vendor/github.com/sourcegraph/conc/iter/map.go b/vendor/github.com/sourcegraph/conc/iter/map.go new file mode 100644 index 00000000..efbe6bfa --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/iter/map.go @@ -0,0 +1,65 @@ +package iter + +import ( + "sync" + + "github.com/sourcegraph/conc/internal/multierror" +) + +// Mapper is an Iterator with a result type R. It can be used to configure +// the behaviour of Map and MapErr. The zero value is safe to use with +// reasonable defaults. +// +// Mapper is also safe for reuse and concurrent use. +type Mapper[T, R any] Iterator[T] + +// Map applies f to each element of input, returning the mapped result. +// +// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable +// goroutine limit, use a custom Mapper. +func Map[T, R any](input []T, f func(*T) R) []R { + return Mapper[T, R]{}.Map(input, f) +} + +// Map applies f to each element of input, returning the mapped result. +// +// Map uses up to the configured Mapper's maximum number of goroutines. +func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { + res := make([]R, len(input)) + Iterator[T](m).ForEachIdx(input, func(i int, t *T) { + res[i] = f(t) + }) + return res +} + +// MapErr applies f to each element of the input, returning the mapped result +// and a combined error of all returned errors. +// +// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable +// goroutine limit, use a custom Mapper. +func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { + return Mapper[T, R]{}.MapErr(input, f) +} + +// MapErr applies f to each element of the input, returning the mapped result +// and a combined error of all returned errors. +// +// Map uses up to the configured Mapper's maximum number of goroutines. +func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { + var ( + res = make([]R, len(input)) + errMux sync.Mutex + errs error + ) + Iterator[T](m).ForEachIdx(input, func(i int, t *T) { + var err error + res[i], err = f(t) + if err != nil { + errMux.Lock() + // TODO: use stdlib errors once multierrors land in go 1.20 + errs = multierror.Join(errs, err) + errMux.Unlock() + } + }) + return res, errs +} diff --git a/vendor/github.com/sourcegraph/conc/panics/BUILD.bazel b/vendor/github.com/sourcegraph/conc/panics/BUILD.bazel new file mode 100644 index 00000000..43bfbe50 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/panics/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "panics", + srcs = [ + "panics.go", + "try.go", + ], + importmap = "peridot.resf.org/vendor/github.com/sourcegraph/conc/panics", + importpath = "github.com/sourcegraph/conc/panics", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/sourcegraph/conc/panics/panics.go b/vendor/github.com/sourcegraph/conc/panics/panics.go new file mode 100644 index 00000000..abbed7fa --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/panics/panics.go @@ -0,0 +1,102 @@ +package panics + +import ( + "fmt" + "runtime" + "runtime/debug" + "sync/atomic" +) + +// Catcher is used to catch panics. You can execute a function with Try, +// which will catch any spawned panic. Try can be called any number of times, +// from any number of goroutines. Once all calls to Try have completed, you can +// get the value of the first panic (if any) with Recovered(), or you can just +// propagate the panic (re-panic) with Repanic(). +type Catcher struct { + recovered atomic.Pointer[Recovered] +} + +// Try executes f, catching any panic it might spawn. It is safe +// to call from multiple goroutines simultaneously. +func (p *Catcher) Try(f func()) { + defer p.tryRecover() + f() +} + +func (p *Catcher) tryRecover() { + if val := recover(); val != nil { + rp := NewRecovered(1, val) + p.recovered.CompareAndSwap(nil, &rp) + } +} + +// Repanic panics if any calls to Try caught a panic. It will panic with the +// value of the first panic caught, wrapped in a panics.Recovered with caller +// information. +func (p *Catcher) Repanic() { + if val := p.Recovered(); val != nil { + panic(val) + } +} + +// Recovered returns the value of the first panic caught by Try, or nil if +// no calls to Try panicked. +func (p *Catcher) Recovered() *Recovered { + return p.recovered.Load() +} + +// NewRecovered creates a panics.Recovered from a panic value and a collected +// stacktrace. The skip parameter allows the caller to skip stack frames when +// collecting the stacktrace. Calling with a skip of 0 means include the call to +// NewRecovered in the stacktrace. +func NewRecovered(skip int, value any) Recovered { + // 64 frames should be plenty + var callers [64]uintptr + n := runtime.Callers(skip+1, callers[:]) + return Recovered{ + Value: value, + Callers: callers[:n], + Stack: debug.Stack(), + } +} + +// Recovered is a panic that was caught with recover(). +type Recovered struct { + // The original value of the panic. + Value any + // The caller list as returned by runtime.Callers when the panic was + // recovered. Can be used to produce a more detailed stack information with + // runtime.CallersFrames. + Callers []uintptr + // The formatted stacktrace from the goroutine where the panic was recovered. + // Easier to use than Callers. + Stack []byte +} + +// String renders a human-readable formatting of the panic. +func (p *Recovered) String() string { + return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) +} + +// AsError casts the panic into an error implementation. The implementation +// is unwrappable with the cause of the panic, if the panic was provided one. +func (p *Recovered) AsError() error { + if p == nil { + return nil + } + return &ErrRecovered{*p} +} + +// ErrRecovered wraps a panics.Recovered in an error implementation. +type ErrRecovered struct{ Recovered } + +var _ error = (*ErrRecovered)(nil) + +func (p *ErrRecovered) Error() string { return p.String() } + +func (p *ErrRecovered) Unwrap() error { + if err, ok := p.Value.(error); ok { + return err + } + return nil +} diff --git a/vendor/github.com/sourcegraph/conc/panics/try.go b/vendor/github.com/sourcegraph/conc/panics/try.go new file mode 100644 index 00000000..4ded92a1 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/panics/try.go @@ -0,0 +1,11 @@ +package panics + +// Try executes f, catching and returning any panic it might spawn. +// +// The recovered panic can be propagated with panic(), or handled as a normal error with +// (*panics.Recovered).AsError(). +func Try(f func()) *Recovered { + var c Catcher + c.Try(f) + return c.Recovered() +} diff --git a/vendor/github.com/sourcegraph/conc/waitgroup.go b/vendor/github.com/sourcegraph/conc/waitgroup.go new file mode 100644 index 00000000..47b1bc1a --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/waitgroup.go @@ -0,0 +1,52 @@ +package conc + +import ( + "sync" + + "github.com/sourcegraph/conc/panics" +) + +// NewWaitGroup creates a new WaitGroup. +func NewWaitGroup() *WaitGroup { + return &WaitGroup{} +} + +// WaitGroup is the primary building block for scoped concurrency. +// Goroutines can be spawned in the WaitGroup with the Go method, +// and calling Wait() will ensure that each of those goroutines exits +// before continuing. Any panics in a child goroutine will be caught +// and propagated to the caller of Wait(). +// +// The zero value of WaitGroup is usable, just like sync.WaitGroup. +// Also like sync.WaitGroup, it must not be copied after first use. +type WaitGroup struct { + wg sync.WaitGroup + pc panics.Catcher +} + +// Go spawns a new goroutine in the WaitGroup. +func (h *WaitGroup) Go(f func()) { + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.pc.Try(f) + }() +} + +// Wait will block until all goroutines spawned with Go exit and will +// propagate any panics spawned in a child goroutine. +func (h *WaitGroup) Wait() { + h.wg.Wait() + + // Propagate a panic if we caught one from a child goroutine. + h.pc.Repanic() +} + +// WaitAndRecover will block until all goroutines spawned with Go exit and +// will return a *panics.Recovered if one of the child goroutines panics. +func (h *WaitGroup) WaitAndRecover() *panics.Recovered { + h.wg.Wait() + + // Return a recovered panic if we caught one from a child goroutine. + return h.pc.Recovered() +} diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml deleted file mode 100644 index 6420d1c2..00000000 --- a/vendor/github.com/spf13/cast/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -env: - - GO111MODULE=on -sudo: required -go: - - "1.11.x" - - tip -os: - - linux -matrix: - allow_failures: - - go: tip - fast_finish: true -script: - - make check diff --git a/vendor/github.com/spf13/cast/BUILD.bazel b/vendor/github.com/spf13/cast/BUILD.bazel index 8adfc709..e84aeb9c 100644 --- a/vendor/github.com/spf13/cast/BUILD.bazel +++ b/vendor/github.com/spf13/cast/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "cast.go", "caste.go", + "timeformattype_string.go", ], importmap = "peridot.resf.org/vendor/github.com/spf13/cast", importpath = "github.com/spf13/cast", diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile index 7ccf8930..f01a5dbb 100644 --- a/vendor/github.com/spf13/cast/Makefile +++ b/vendor/github.com/spf13/cast/Makefile @@ -1,4 +1,4 @@ -# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) .PHONY: check fmt lint test test-race vet test-cover-html help .DEFAULT_GOAL := help @@ -12,11 +12,13 @@ test-race: ## Run tests with race detector go test -race ./... fmt: ## Run gofmt linter +ifeq "$(GOVERSION)" "12" @for d in `go list` ; do \ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ echo "^ improperly formatted go files" && echo && exit 1; \ fi \ done +endif lint: ## Run golint linter @for d in `go list` ; do \ diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index e6939397..0e9e1459 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,8 +1,9 @@ -cast -==== -[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) +# cast + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go @@ -17,7 +18,7 @@ interface into a bool, etc. Cast does this intelligently when an obvious conversion is possible. It doesn’t make any attempts to guess what you meant, for example you can only convert a string to an int when it is a string representation of an int such as “8”. Cast was developed for use in -[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON for meta data. ## Why use Cast? @@ -72,4 +73,3 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 - diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 9fba638d..0cfe9418 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -20,6 +20,11 @@ func ToTime(i interface{}) time.Time { return v } +func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + // ToDuration casts an interface to a time.Duration type. func ToDuration(i interface{}) time.Duration { v, _ := ToDurationE(i) diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index a4859fb0..d49bbf83 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -20,13 +20,26 @@ var errNegativeNotAllowed = errors.New("unable to cast negative value") // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { i = indirect(i) switch v := i.(type) { case time.Time: return v, nil case string: - return StringToDate(v) + return StringToDateInDefaultLocation(v, location) + case json.Number: + s, err1 := ToInt64E(v) + if err1 != nil { + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } + return time.Unix(s, 0), nil case int: return time.Unix(int64(v), 0), nil case int64: @@ -64,6 +77,11 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return + case json.Number: + var v float64 + v, err = s.Float64() + d = time.Duration(v) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -80,12 +98,39 @@ func ToBoolE(i interface{}) (bool, error) { case nil: return false, nil case int: - if i.(int) != 0 { - return true, nil - } - return false, nil + return b != 0, nil + case int64: + return b != 0, nil + case int32: + return b != 0, nil + case int16: + return b != 0, nil + case int8: + return b != 0, nil + case uint: + return b != 0, nil + case uint64: + return b != 0, nil + case uint32: + return b != 0, nil + case uint16: + return b != 0, nil + case uint8: + return b != 0, nil + case float64: + return b != 0, nil + case float32: + return b != 0, nil + case time.Duration: + return b != 0, nil case string: return strconv.ParseBool(i.(string)) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) default: return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) } @@ -95,13 +140,16 @@ func ToBoolE(i interface{}) (bool, error) { func ToFloat64E(i interface{}) (float64, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return float64(intv), nil + } + switch s := i.(type) { case float64: return s, nil case float32: return float64(s), nil - case int: - return float64(s), nil case int64: return float64(s), nil case int32: @@ -126,11 +174,19 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case json.Number: + v, err := s.Float64() + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) case bool: if s { return 1, nil } return 0, nil + case nil: + return 0, nil default: return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) } @@ -140,13 +196,16 @@ func ToFloat64E(i interface{}) (float64, error) { func ToFloat32E(i interface{}) (float32, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return float32(intv), nil + } + switch s := i.(type) { case float64: return float32(s), nil case float32: return s, nil - case int: - return float32(s), nil case int64: return float32(s), nil case int32: @@ -171,11 +230,19 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case json.Number: + v, err := s.Float64() + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) case bool: if s { return 1, nil } return 0, nil + case nil: + return 0, nil default: return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) } @@ -185,9 +252,12 @@ func ToFloat32E(i interface{}) (float32, error) { func ToInt64E(i interface{}) (int64, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return int64(intv), nil + } + switch s := i.(type) { - case int: - return int64(s), nil case int64: return s, nil case int32: @@ -211,11 +281,13 @@ func ToInt64E(i interface{}) (int64, error) { case float32: return int64(s), nil case string: - v, err := strconv.ParseInt(s, 0, 0) + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case json.Number: + return ToInt64E(string(s)) case bool: if s { return 1, nil @@ -232,9 +304,12 @@ func ToInt64E(i interface{}) (int64, error) { func ToInt32E(i interface{}) (int32, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return int32(intv), nil + } + switch s := i.(type) { - case int: - return int32(s), nil case int64: return int32(s), nil case int32: @@ -258,11 +333,13 @@ func ToInt32E(i interface{}) (int32, error) { case float32: return int32(s), nil case string: - v, err := strconv.ParseInt(s, 0, 0) + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) if err == nil { return int32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case json.Number: + return ToInt32E(string(s)) case bool: if s { return 1, nil @@ -279,9 +356,12 @@ func ToInt32E(i interface{}) (int32, error) { func ToInt16E(i interface{}) (int16, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return int16(intv), nil + } + switch s := i.(type) { - case int: - return int16(s), nil case int64: return int16(s), nil case int32: @@ -305,11 +385,13 @@ func ToInt16E(i interface{}) (int16, error) { case float32: return int16(s), nil case string: - v, err := strconv.ParseInt(s, 0, 0) + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) if err == nil { return int16(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case json.Number: + return ToInt16E(string(s)) case bool: if s { return 1, nil @@ -326,9 +408,12 @@ func ToInt16E(i interface{}) (int16, error) { func ToInt8E(i interface{}) (int8, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return int8(intv), nil + } + switch s := i.(type) { - case int: - return int8(s), nil case int64: return int8(s), nil case int32: @@ -352,11 +437,13 @@ func ToInt8E(i interface{}) (int8, error) { case float32: return int8(s), nil case string: - v, err := strconv.ParseInt(s, 0, 0) + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) if err == nil { return int8(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case json.Number: + return ToInt8E(string(s)) case bool: if s { return 1, nil @@ -373,9 +460,12 @@ func ToInt8E(i interface{}) (int8, error) { func ToIntE(i interface{}) (int, error) { i = indirect(i) + intv, ok := toInt(i) + if ok { + return intv, nil + } + switch s := i.(type) { - case int: - return s, nil case int64: return int(s), nil case int32: @@ -399,11 +489,13 @@ func ToIntE(i interface{}) (int, error) { case float32: return int(s), nil case string: - v, err := strconv.ParseInt(s, 0, 0) + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) if err == nil { return int(v), nil } - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case json.Number: + return ToIntE(string(s)) case bool: if s { return 1, nil @@ -420,18 +512,26 @@ func ToIntE(i interface{}) (int, error) { func ToUintE(i interface{}) (uint, error) { i = indirect(i) - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 0) - if err == nil { - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) - case int: - if s < 0 { + intv, ok := toInt(i) + if ok { + if intv < 0 { return 0, errNegativeNotAllowed } - return uint(s), nil + return uint(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + case json.Number: + return ToUintE(string(s)) case int64: if s < 0 { return 0, errNegativeNotAllowed @@ -488,18 +588,26 @@ func ToUintE(i interface{}) (uint, error) { func ToUint64E(i interface{}) (uint64, error) { i = indirect(i) - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) - case int: - if s < 0 { + intv, ok := toInt(i) + if ok { + if intv < 0 { return 0, errNegativeNotAllowed } - return uint64(s), nil + return uint64(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint64(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + case json.Number: + return ToUint64E(string(s)) case int64: if s < 0 { return 0, errNegativeNotAllowed @@ -556,18 +664,26 @@ func ToUint64E(i interface{}) (uint64, error) { func ToUint32E(i interface{}) (uint32, error) { i = indirect(i) - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 32) - if err == nil { - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) - case int: - if s < 0 { + intv, ok := toInt(i) + if ok { + if intv < 0 { return 0, errNegativeNotAllowed } - return uint32(s), nil + return uint32(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + case json.Number: + return ToUint32E(string(s)) case int64: if s < 0 { return 0, errNegativeNotAllowed @@ -624,18 +740,26 @@ func ToUint32E(i interface{}) (uint32, error) { func ToUint16E(i interface{}) (uint16, error) { i = indirect(i) - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 16) - if err == nil { - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) - case int: - if s < 0 { + intv, ok := toInt(i) + if ok { + if intv < 0 { return 0, errNegativeNotAllowed } - return uint16(s), nil + return uint16(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + case json.Number: + return ToUint16E(string(s)) case int64: if s < 0 { return 0, errNegativeNotAllowed @@ -692,18 +816,26 @@ func ToUint16E(i interface{}) (uint16, error) { func ToUint8E(i interface{}) (uint8, error) { i = indirect(i) - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 8) - if err == nil { - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) - case int: - if s < 0 { + intv, ok := toInt(i) + if ok { + if intv < 0 { return 0, errNegativeNotAllowed } - return uint8(s), nil + return uint8(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + case json.Number: + return ToUint8E(string(s)) case int64: if s < 0 { return 0, errNegativeNotAllowed @@ -819,15 +951,17 @@ func ToStringE(i interface{}) (string, error) { case int8: return strconv.FormatInt(int64(s), 10), nil case uint: - return strconv.FormatInt(int64(s), 10), nil + return strconv.FormatUint(uint64(s), 10), nil case uint64: - return strconv.FormatInt(int64(s), 10), nil + return strconv.FormatUint(uint64(s), 10), nil case uint32: - return strconv.FormatInt(int64(s), 10), nil + return strconv.FormatUint(uint64(s), 10), nil case uint16: - return strconv.FormatInt(int64(s), 10), nil + return strconv.FormatUint(uint64(s), 10), nil case uint8: - return strconv.FormatInt(int64(s), 10), nil + return strconv.FormatUint(uint64(s), 10), nil + case json.Number: + return s.String(), nil case []byte: return string(s), nil case template.HTML: @@ -1129,8 +1263,43 @@ func ToStringSliceE(i interface{}) ([]string, error) { return a, nil case []string: return v, nil + case []int8: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil case string: return strings.Fields(v), nil + case []error: + for _, err := range i.([]error) { + a = append(a, err.Error()) + } + return a, nil case interface{}: str, err := ToStringE(v) if err != nil { @@ -1204,37 +1373,84 @@ func ToDurationSliceE(i interface{}) ([]time.Duration, error) { // predefined list of formats. If no suitable format is found, an error is // returned. func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, []string{ - time.RFC3339, - "2006-01-02T15:04:05", // iso8601 without timezone - time.RFC1123Z, - time.RFC1123, - time.RFC822Z, - time.RFC822, - time.RFC850, - time.ANSIC, - time.UnixDate, - time.RubyDate, - "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() - "2006-01-02", - "02 Jan 2006", - "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon - "2006-01-02 15:04:05 -07:00", - "2006-01-02 15:04:05 -0700", - "2006-01-02 15:04:05Z07:00", // RFC3339 without T - "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon - "2006-01-02 15:04:05", - time.Kitchen, - time.Stamp, - time.StampMilli, - time.StampMicro, - time.StampNano, - }) + return parseDateWith(s, time.UTC, timeFormats) } -func parseDateWith(s string, dates []string) (d time.Time, e error) { - for _, dateType := range dates { - if d, e = time.Parse(dateType, s); e == nil { +// StringToDateInDefaultLocation casts an empty interface to a time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return parseDateWith(s, location, timeFormats) +} + +type timeFormatType int + +const ( + timeFormatNoTimezone timeFormatType = iota + timeFormatNamedTimezone + timeFormatNumericTimezone + timeFormatNumericAndNamedTimezone + timeFormatTimeOnly +) + +type timeFormat struct { + format string + typ timeFormatType +} + +func (f timeFormat) hasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone +} + +var ( + timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, + } +) + +func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { + + for _, format := range formats { + if d, e = time.Parse(format.format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.typ <= timeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + return } } @@ -1247,3 +1463,36 @@ func jsonStringToObject(s string, v interface{}) error { data := []byte(s) return json.Unmarshal(data, v) } + +// toInt returns the int value of v if v or v's underlying type +// is an int. +// Note that this will return false for int64 etc. types. +func toInt(v interface{}) (int, bool) { + switch v := v.(type) { + case int: + return v, true + case time.Weekday: + return int(v), true + case time.Month: + return int(v), true + default: + return 0, false + } +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go new file mode 100644 index 00000000..1524fc82 --- /dev/null +++ b/vendor/github.com/spf13/cast/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. + +package cast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[timeFormatNoTimezone-0] + _ = x[timeFormatNamedTimezone-1] + _ = x[timeFormatNumericTimezone-2] + _ = x[timeFormatNumericAndNamedTimezone-3] + _ = x[timeFormatTimeOnly-4] +} + +const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" + +var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i timeFormatType) String() string { + if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { + return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 0d6e6179..2c8f4808 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -1,3 +1,17 @@ +# Copyright 2013-2023 The Cobra Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + run: deadline: 5m @@ -5,44 +19,39 @@ linters: disable-all: true enable: #- bodyclose - - deadcode + # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck - - structcheck - #- stylecheck + - staticcheck + #- structcheck ! deprecated since v1.49.0; replaced by 'unused' + - stylecheck #- typecheck - unconvert #- unparam - #- unused - - varcheck + - unused + # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace fast: false diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index e0a3b500..00000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -stages: - - test - - build - -go: - - 1.12.x - - 1.13.x - - tip - -env: GO111MODULE=on - -before_install: - - go get -u github.com/kyoh86/richgo - - go get -u github.com/mitchellh/gox - - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest - -matrix: - allow_failures: - - go: tip - include: - - stage: build - go: 1.13.x - script: make cobra_generator - -script: - - make test diff --git a/vendor/github.com/spf13/cobra/BUILD.bazel b/vendor/github.com/spf13/cobra/BUILD.bazel index c700968f..cba38258 100644 --- a/vendor/github.com/spf13/cobra/BUILD.bazel +++ b/vendor/github.com/spf13/cobra/BUILD.bazel @@ -3,14 +3,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "cobra", srcs = [ + "active_help.go", "args.go", "bash_completions.go", + "bash_completionsV2.go", "cobra.go", "command.go", "command_notwin.go", "command_win.go", - "custom_completions.go", + "completions.go", "fish_completions.go", + "flag_groups.go", "powershell_completions.go", "shell_completions.go", "zsh_completions.go", diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md deleted file mode 100644 index 8a23b4f8..00000000 --- a/vendor/github.com/spf13/cobra/CHANGELOG.md +++ /dev/null @@ -1,51 +0,0 @@ -# Cobra Changelog - -## v1.1.3 - -* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility - -## v1.1.2 - -### Notable Changes - -* Bump license year to 2021 in golden files (#1309) @Bowbaq -* Enhance PowerShell completion with custom comp (#1208) @Luap99 -* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670 -* Documentation readability improvements (#1228 etc.) @zaataylor etc. -* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor - -## v1.1.1 - -* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context. -* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context. - -## v1.1.0 - -### Notable Changes - -* Extend Go completions and revamp zsh comp (#1070) -* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb -* Add completion for help command (#1136) -* Complete subcommands when TraverseChildren is set (#1171) -* Fix stderr printing functions (#894) -* fix: fish output redirection (#1247) - -## v1.0.0 - -Announcing v1.0.0 of Cobra. 🎉 - -### Notable Changes -* Fish completion (including support for Go custom completion) @marckhouzam -* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam -* Remove/replace SetOutput on Command - deprecated @jpmcb -* add support for autolabel stale PR @xchapter7x -* Add Labeler Actions @xchapter7x -* Custom completions coded in Go (instead of Bash) @marckhouzam -* Partial Revert of #922 @jharshman -* Add Makefile to project @jharshman -* Correct documentation for InOrStdin @desponda -* Apply formatting to templates @jharshman -* Revert change so help is printed on stdout again @marckhouzam -* Update md2man to v2.0.0 @pdf -* update viper to v1.4.0 @umarcor -* Update cmd/root.go example in README.md @jharshman diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS new file mode 100644 index 00000000..4c5ac3dd --- /dev/null +++ b/vendor/github.com/spf13/cobra/MAINTAINERS @@ -0,0 +1,13 @@ +maintainers: +- spf13 +- johnSchnake +- jpmcb +- marckhouzam +inactive: +- anthonyfok +- bep +- bogem +- broady +- eparis +- jharshman +- wfernandes diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index 472c73bf..0da8d7aa 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -5,15 +5,11 @@ ifeq (, $(shell which golangci-lint)) $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") endif -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") -endif - -.PHONY: fmt lint test cobra_generator install_deps clean +.PHONY: fmt lint test install_deps clean default: all -all: fmt test cobra_generator +all: fmt test fmt: $(info ******************** checking formatting ********************) @@ -23,14 +19,13 @@ lint: $(info ******************** running lint tools ********************) golangci-lint run -v -test: install_deps lint +test: install_deps $(info ******************** running tests ********************) - richgo test -v ./... + go test -v ./... -cobra_generator: install_deps - $(info ******************** building generator ********************) - mkdir -p $(BIN) - make -C cobra all +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) + richgo test -v ./... install_deps: $(info ******************** downloading dependencies ********************) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index a1b13ddd..6444f4b7 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,61 +1,35 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) +![cobra logo](assets/CobraMain.png) -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. +Cobra is a library for creating powerful modern CLI applications. -Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), -[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. +Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), +[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to +name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating shell completions](#generating-shell-completions) -- [Contributing](CONTRIBUTING.md) -- [License](#license) - # Overview Cobra is a library providing a simple interface to create powerful modern CLI interfaces similar to git & go tools. -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - Cobra provides: * Easy subcommand-based CLIs: `app server`, `app fetch`, etc. * Fully POSIX-compliant flags (including short & long versions) * Nested subcommands * Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags +* Grouping help for subcommands * Automatic help flag recognition of `-h`, `--help`, etc. * Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) * Automatically generated man pages for your application * Command aliases so you can change things without breaking them * The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps +* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps # Concepts @@ -67,9 +41,9 @@ The best applications read like sentences when used, and as a result, users intuitively know how to interact with them. The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` +`APPNAME VERB NOUN --ADJECTIVE` or -`APPNAME COMMAND ARG --FLAG` +`APPNAME COMMAND ARG --FLAG`. A few good real world examples may better illustrate this point. @@ -89,7 +63,7 @@ have children commands and optionally run an action. In the example above, 'server' is the command. -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) ## Flags @@ -106,10 +80,11 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: +of the library. - go get -u github.com/spf13/cobra +``` +go get -u github.com/spf13/cobra@latest +``` Next, include Cobra in your application: @@ -117,644 +92,21 @@ Next, include Cobra in your application: import "github.com/spf13/cobra" ``` -# Getting Started +# Usage +`cobra-cli` is a command line program to generate cobra applications and command files. +It will bootstrap your application scaffolding to rapidly +develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: +It can be installed by running: ``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go +go install github.com/spf13/cobra-cli@latest ``` -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. +For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). # License -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go new file mode 100644 index 00000000..25c30e3c --- /dev/null +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -0,0 +1,60 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" +) + +const ( + activeHelpMarker = "_activeHelp_ " + // The below values should not be changed: programs will be using them explicitly + // in their user documentation, and users will be using them explicitly. + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix + activeHelpGlobalDisable = "0" +) + +// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. +// Such strings will be processed by the completion script and will be shown as ActiveHelp +// to the user. +// The array parameter should be the array that will contain the completions. +// This function can be called multiple times before and/or after completions are added to +// the array. Each time this function is called with the same array, the new +// ActiveHelp line will be shown below the previous ones when completion is triggered. +func AppendActiveHelp(compArray []string, activeHelpStr string) []string { + return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) +} + +// GetActiveHelpConfig returns the value of the ActiveHelp environment variable +// _ACTIVE_HELP where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP +// is set to "0". +func GetActiveHelpConfig(cmd *Command) string { + activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) + if activeHelpCfg != activeHelpGlobalDisable { + activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) + } + return activeHelpCfg +} + +// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment +// variable. It has the format _ACTIVE_HELP where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func activeHelpEnvVar(name string) string { + return configEnvVar(name, activeHelpEnvVarSuffix) +} diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 70e9b262..ed1e70ce 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cobra import ( @@ -7,7 +21,7 @@ import ( type PositionalArgs func(cmd *Command, args []string) error -// Legacy arg validation has the following behaviour: +// legacyArgs validation has the following behaviour: // - root commands with no subcommands can take arbitrary arguments // - root commands with subcommands will do subcommand validity checking // - subcommands will always accept arbitrary arguments @@ -32,16 +46,16 @@ func NoArgs(cmd *Command, args []string) error { return nil } -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. +// OnlyValidArgs returns an error if there are any positional args that are not in +// the `ValidArgs` field of `Command` func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } - for _, v := range args { if !stringInSlice(v, validArgs) { return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) @@ -86,18 +100,6 @@ func ExactArgs(n int) PositionalArgs { } } -// ExactValidArgs returns an error if -// there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -func ExactValidArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if err := ExactArgs(n)(cmd, args); err != nil { - return err - } - return OnlyValidArgs(cmd, args) - } -} - // RangeArgs returns an error if the number of args is not within the expected range. func RangeArgs(min int, max int) PositionalArgs { return func(cmd *Command, args []string) error { @@ -107,3 +109,23 @@ func RangeArgs(min int, max int) PositionalArgs { return nil } } + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} + +// ExactValidArgs returns an error if there are not exactly N positional args OR +// there are any positional args that are not in the `ValidArgs` field of `Command` +// +// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead +func ExactValidArgs(n int) PositionalArgs { + return MatchAll(ExactArgs(n), OnlyValidArgs) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 71061479..f4d198cb 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cobra import ( @@ -24,7 +38,7 @@ func writePreamble(buf io.StringWriter, name string) { WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -71,9 +85,10 @@ __%[1]s_handle_go_custom_completion() local out requestComp lastParam lastChar comp directive args # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + # Calling ${words[0]} instead of directly %[1]s allows handling aliases args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" + # Disable ActiveHelp which is not supported for bash completion v1 + requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" lastParam=${words[$((${#words[@]}-1))]} lastChar=${lastParam:$((${#lastParam}-1)):1} @@ -99,7 +114,7 @@ __%[1]s_handle_go_custom_completion() directive=0 fi __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" - __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}" + __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then # Error code. No completion. @@ -125,7 +140,7 @@ __%[1]s_handle_go_custom_completion() local fullFilter filter filteringCmd # Do not use quotes around the $out variable or else newline # characters will be kept. - for filter in ${out[*]}; do + for filter in ${out}; do fullFilter+="$filter|" done @@ -134,9 +149,9 @@ __%[1]s_handle_go_custom_completion() $filteringCmd elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir # Use printf to strip any trailing newline - subdir=$(printf "%%s" "${out[0]}") + subdir=$(printf "%%s" "${out}") if [ -n "$subdir" ]; then __%[1]s_debug "Listing directories in $subdir" __%[1]s_handle_subdirs_in_dir_flag "$subdir" @@ -147,7 +162,7 @@ __%[1]s_handle_go_custom_completion() else while IFS='' read -r comp; do COMPREPLY+=("$comp") - done < <(compgen -W "${out[*]}" -- "$cur") + done < <(compgen -W "${out}" -- "$cur") fi } @@ -187,13 +202,19 @@ __%[1]s_handle_reply() PREFIX="" cur="${cur#*=}" ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then + if [ -n "${ZSH_VERSION:-}" ]; then # zsh completion needs --flag= prefix eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" fi fi fi - return 0; + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi ;; esac @@ -232,13 +253,13 @@ __%[1]s_handle_reply() fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi fi # available in bash-completion >= 2, not always present on macOS @@ -272,7 +293,7 @@ __%[1]s_handle_flag() # if a command required a flag, and we found it, unset must_have_one_flag() local flagname=${words[c]} - local flagvalue + local flagvalue="" # if the word contained an = if [[ ${words[c]} == *"="* ]]; then flagvalue=${flagname#*=} # take in as flagvalue after the = @@ -291,7 +312,7 @@ __%[1]s_handle_flag() # keep flag value with flagname as flaghash # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then if [ -n "${flagvalue}" ] ; then flaghash[${flagname}]=${flagvalue} elif [ -n "${words[ $((c+1)) ]}" ] ; then @@ -303,7 +324,7 @@ __%[1]s_handle_flag() # skip the argument to a two word flag if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -363,7 +384,7 @@ __%[1]s_handle_word() __%[1]s_handle_command elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then words[c]=${aliashash[${words[c]}]} __%[1]s_handle_command else @@ -377,14 +398,14 @@ __%[1]s_handle_word() `, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) } func writePostscript(buf io.StringWriter, name string) { - name = strings.Replace(name, ":", "__", -1) + name = strings.ReplaceAll(name, ":", "__") WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword + local cur prev words cword split declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -394,17 +415,20 @@ func writePostscript(buf io.StringWriter, name string) { fi local c=0 + local flag_parsing_disabled= local flags=() local two_word_flags=() local local_nonpersistent_flags=() local flags_with_completion=() local flags_completion=() local commands=("%[1]s") + local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() - local has_completion_function - local last_command + local has_completion_function="" + local last_command="" local nouns=() + local noun_aliases=() __%[1]s_handle_word } @@ -508,8 +532,10 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { } } -// Setup annotations for go completions for registered flags +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() for flag := range flagCompletionFunctions { // Make sure the completion script calls the __*_go_custom_completion function for // every registered flag. We need to do this here (and not when the flag was registered @@ -531,6 +557,11 @@ func writeFlags(buf io.StringWriter, cmd *Command) { flags_completion=() `) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -566,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -590,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { @@ -605,7 +633,7 @@ func writeCmdAliases(buf io.StringWriter, cmd *Command) { sort.Strings(cmd.Aliases) - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) @@ -629,8 +657,8 @@ func gen(buf io.StringWriter, cmd *Command) { gen(buf, c) } commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) + commandName = strings.ReplaceAll(commandName, " ", "_") + commandName = strings.ReplaceAll(commandName, ":", "__") if cmd.Root() == cmd { WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 130f99b9..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,91 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 00000000..1cce5c32 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,396 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows handling aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [[ -z ${cur} && ${lastChar} != = ]]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ ${cur} == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [[ ${directive} == "${out}" ]]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d + + if (((directive & shellCompDirectiveError) != 0)); then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + # Separate activeHelp from normal completions + local completions=() + local activeHelp=() + __%[1]s_extract_activeHelp + + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $completions variable or else newline + # characters will be kept. + for filter in ${completions[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then + # File completion for directories only + + local subdir + subdir=${completions[0]} + if [[ -n $subdir ]]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_completion_types + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = + + # Print the activeHelp statements before we finish + if ((${#activeHelp[*]} != 0)); then + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" + fi + fi +} + +# Separate activeHelp lines from real completions. +# Fills the $activeHelp and $completions arrays. +__%[1]s_extract_activeHelp() { + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + + while IFS='' read -r comp; do + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then + comp=${comp:endIndex} + __%[1]s_debug "ActiveHelp found: $comp" + if [[ -n $comp ]]; then + activeHelp+=("$comp") + fi + else + # Not an activeHelp line but a normal completion + completions+=("$comp") + fi + done <<<"${out}" +} + +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab=$'\t' comp + while IFS='' read -r comp; do + [[ -z $comp ]] && continue + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + if [[ $comp == "$cur"* ]]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${completions[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + +__%[1]s_handle_standard_completion_case() { + local tab=$'\t' comp + + # Short circuit to optimize if we don't have descriptions + if [[ "${completions[*]}" != *$tab* ]]; then + IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + return 0 + fi + + local longest=0 + local compline + # Look for the longest completion so that we can format things nicely + while IFS='' read -r compline; do + [[ -z $compline ]] && continue + # Strip any description before checking the length + comp=${compline%%%%$tab*} + # Only consider the completions that match + [[ $comp == "$cur"* ]] || continue + COMPREPLY+=("$compline") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${completions[@]}") + + # If there is a single completion left, remove the description text + if ((${#COMPREPLY[*]} == 1)); then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%%$tab*}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY[0]=$comp + else # Format the descriptions + __%[1]s_format_comp_descriptions $longest + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab=$'\t' + local comp desc maxdesclength + local longest=$1 + + local i ci + for ci in ${!COMPREPLY[*]}; do + comp=${COMPREPLY[ci]} + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + __%[1]s_debug "Original comp: $comp" + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if ((maxdesclength > 8)); then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + COMPREPLY[ci]=$comp + __%[1]s_debug "Final comp: $comp" + fi + done +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n =: || return + else + __%[1]s_init_completion -n =: || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index d6cbfd71..e0b0947b 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -1,9 +1,10 @@ -// Copyright © 2013 Steve Francia . +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 +// +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -39,15 +40,30 @@ var templateFuncs = template.FuncMap{ } var initializers []func() +var finalizers []func() -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +const ( + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false +) + +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing // to automatically enable in CLI tools. // Set this to true to enable it. -var EnablePrefixMatching = false +var EnablePrefixMatching = defaultPrefixMatching // EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. // To disable sorting, set it to false. -var EnableCommandSorting = true +var EnableCommandSorting = defaultCommandSorting + +// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) +var EnableCaseInsensitive = defaultCaseInsensitive + +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks // MousetrapHelpText enables an information splash screen on Windows // if the CLI is started from explorer.exe. @@ -84,6 +100,12 @@ func OnInitialize(y ...func()) { initializers = append(initializers, y...) } +// OnFinalize sets the passed functions to be run when each command's +// Execute method is terminated. +func OnFinalize(y ...func()) { + finalizers = append(finalizers, y...) +} + // FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. // Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, @@ -150,8 +172,8 @@ func appendIfNotPresent(s, stringToAppend string) string { // rpad adds padding to the right of a string. func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) } // tmpl executes the given template text on data, writing the result to w. @@ -171,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index d6732ad1..54748fc6 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -1,9 +1,10 @@ -// Copyright © 2013 Steve Francia . +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 +// +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +19,7 @@ package cobra import ( "bytes" "context" + "errors" "fmt" "io" "os" @@ -28,16 +30,27 @@ import ( flag "github.com/spf13/pflag" ) +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) + // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist +// Group Structure to manage groups for commands +type Group struct { + ID string + Title string +} + // Command is just that, a command for your application. // E.g. 'go run ...' - 'run' is the command. Cobra requires // you to define the usage and description as part of your command // definition to ensure usability. type Command struct { // Use is the one-line usage message. - // Recommended syntax is as follow: + // Recommended syntax is as follows: // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. // ... indicates that you can specify multiple values for the previous argument. // | indicates mutually exclusive information. You can use the argument to the left of the separator or the @@ -57,15 +70,18 @@ type Command struct { // Short is the short description shown in the 'help' output. Short string + // The group id under which this subcommand is grouped in the 'help' output of its parent. + GroupID string + // Long is the long message shown in the 'help ' output. Long string // Example is examples of how to use the command. Example string - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion. + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) @@ -74,18 +90,19 @@ type Command struct { Args PositionalArgs // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, + // These are not suggested to the user in the shell completion, // but accepted if entered manually. ArgAliases []string - // BashCompletionFunction is custom functions used by the bash autocompletion generator. + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction BashCompletionFunction string // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string // Annotations are key/value pairs that can be used by applications to identify or - // group commands. + // group commands or set special options. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not @@ -101,6 +118,8 @@ type Command struct { // * PostRun() // * PersistentPostRun() // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. // // PersistentPreRun: children of this command will inherit and execute. PersistentPreRun func(cmd *Command, args []string) @@ -123,6 +142,9 @@ type Command struct { // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error + // groups for subcommands + commandgroups []*Group + // args is actual args parsed from flags. args []string // flagErrorBuf contains all error messages from pflag. @@ -132,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -155,9 +179,18 @@ type Command struct { // helpCommand is command with usage 'help'. If it's not defined by user, // cobra uses default help command. helpCommand *Command + // helpCommandGroupID is the group id for the helpCommand + helpCommandGroupID string + + // completionCommandGroupID is the group id for the completion command + completionCommandGroupID string + // versionTemplate is the version template defined by user. versionTemplate string + // errPrefix is the error message prefix defined by user. + errPrefix string + // inReader is a reader defined by the user that replaces stdin inReader io.Reader // outWriter is a writer defined by the user that replaces stdout @@ -165,9 +198,12 @@ type Command struct { // errWriter is a writer defined by the user that replaces stderr errWriter io.Writer - //FParseErrWhitelist flag parse errors to be ignored + // FParseErrWhitelist flag parse errors to be ignored FParseErrWhitelist FParseErrWhitelist + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + // commandsAreSorted defines, if command slice are sorted or not. commandsAreSorted bool // commandCalledAs is the name or alias value used to call this command. @@ -220,12 +256,23 @@ type Command struct { SuggestionsMinimumDistance int } -// Context returns underlying command context. If command wasn't -// executed with ExecuteContext Context returns Background context. +// Context returns underlying command context. If command was executed +// with ExecuteContext or the context was set with SetContext, the +// previously set context will be returned. Otherwise, nil is returned. +// +// Notice that a call to Execute and ExecuteC will replace a nil context of +// a command with a context.Background, so a background context will be +// returned by Context after one of these functions has been called. func (c *Command) Context() context.Context { return c.ctx } +// SetContext sets context for the command. This context will be overwritten by +// Command.ExecuteContext or Command.ExecuteContextC. +func (c *Command) SetContext(ctx context.Context) { + c.ctx = ctx +} + // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { @@ -284,6 +331,21 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } +// SetHelpCommandGroupID sets the group id of the help command. +func (c *Command) SetHelpCommandGroupID(groupID string) { + if c.helpCommand != nil { + c.helpCommand.GroupID = groupID + } + // helpCommandGroupID is used if no helpCommand is defined by the user + c.helpCommandGroupID = groupID +} + +// SetCompletionCommandGroupID sets the group id of the completion command. +func (c *Command) SetCompletionCommandGroupID(groupID string) { + // completionCommandGroupID is used if no completion command is defined by the user + c.Root().completionCommandGroupID = groupID +} + // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { c.helpTemplate = s @@ -294,6 +356,11 @@ func (c *Command) SetVersionTemplate(s string) { c.versionTemplate = s } +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -492,10 +559,16 @@ Aliases: {{.NameAndAliases}}{{end}}{{if .HasExample}} Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} Flags: {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} @@ -537,6 +610,18 @@ func (c *Command) VersionTemplate() string { ` } +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -597,20 +682,44 @@ Loop: // argsMinusFirstX removes only the first x from args. Otherwise, commands that look like // openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := make([]string, 0, len(args)-1) + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } } } return args } func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || + return ((len(arg) >= 3 && arg[0:2] == "--") || (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) } @@ -628,7 +737,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) { cmd := c.findNext(nextSubCmd) if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) } return c, innerArgs } @@ -647,20 +756,20 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { matches := make([]*Command, 0) for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { + if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) { cmd.commandCalledAs.name = next return cmd } @@ -670,7 +779,9 @@ func (c *Command) findNext(next string) *Command { } if len(matches) == 1 { - return matches[0] + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 } return nil @@ -764,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -817,6 +928,8 @@ func (c *Command) execute(a []string) (err error) { c.preRun() + defer c.postRun() + argWoFlags := c.Flags().Args() if c.DisableFlagParsing { argWoFlags = a @@ -826,15 +939,31 @@ func (c *Command) execute(a []string) (err error) { return err } + parents := make([]*Command, 0, 5) for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { if p.PersistentPreRunE != nil { if err := p.PersistentPreRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } if c.PreRunE != nil { @@ -845,9 +974,13 @@ func (c *Command) execute(a []string) (err error) { c.PreRun(c, argWoFlags) } - if err := c.validateRequiredFlags(); err != nil { + if err := c.ValidateRequiredFlags(); err != nil { return err } + if err := c.ValidateFlagGroups(); err != nil { + return err + } + if c.RunE != nil { if err := c.RunE(c, argWoFlags); err != nil { return err @@ -867,10 +1000,14 @@ func (c *Command) execute(a []string) (err error) { if err := p.PersistentPostRunE(c, argWoFlags); err != nil { return err } - break + if !EnableTraverseRunHooks { + break + } } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) - break + if !EnableTraverseRunHooks { + break + } } } @@ -883,8 +1020,15 @@ func (c *Command) preRun() { } } +func (c *Command) postRun() { + for _, x := range finalizers { + x() + } +} + // ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. func (c *Command) ExecuteContext(ctx context.Context) error { c.ctx = ctx return c.Execute() @@ -898,6 +1042,14 @@ func (c *Command) Execute() error { return err } +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + // ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { if c.ctx == nil { @@ -914,9 +1066,14 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { preExecHookFn(c) } - // initialize help as the last point possible to allow for user - // overriding + // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.InitDefaultCompletionCmd() + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() args := c.args @@ -925,7 +1082,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for bash completion + // initialize the hidden command to be used for shell completion c.initCompleteCmd(args) var flags []string @@ -940,7 +1097,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(c.ErrPrefix(), err.Error()) c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err @@ -961,7 +1118,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { if err != nil { // Always show help if requested, even if SilenceErrors is in // effect - if err == flag.ErrHelp { + if errors.Is(err, flag.ErrHelp) { cmd.HelpFunc()(cmd, args) return cmd, nil } @@ -969,7 +1126,7 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) + c.PrintErrln(cmd.ErrPrefix(), err.Error()) } // If root command has SilenceUsage flagged, @@ -983,12 +1140,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { func (c *Command) ValidateArgs(args []string) error { if c.Args == nil { - return nil + return ArbitraryArgs(c, args) } return c.Args(c, args) } -func (c *Command) validateRequiredFlags() error { +// ValidateRequiredFlags validates all required flags are present and returns an error otherwise +func (c *Command) ValidateRequiredFlags() error { if c.DisableFlagParsing { return nil } @@ -1011,6 +1169,19 @@ func (c *Command) validateRequiredFlags() error { return nil } +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + // InitDefaultHelpFlag adds default help flag to c. // It is called automatically by executing the c or by calling help and usage. // If c already has help flag, it will do nothing. @@ -1018,12 +1189,14 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) + _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1049,6 +1222,7 @@ func (c *Command) InitDefaultVersionFlag() { } else { c.Flags().Bool("version", false, usage) } + _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1065,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1091,10 +1265,12 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) } }, + GroupID: c.helpCommandGroupID, } } c.RemoveCommand(c.helpCommand) @@ -1155,6 +1331,36 @@ func (c *Command) AddCommand(cmds ...*Command) { } } +// Groups returns a slice of child command groups. +func (c *Command) Groups() []*Group { + return c.commandgroups +} + +// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group +func (c *Command) AllChildCommandsHaveGroup() bool { + for _, sub := range c.commands { + if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" { + return false + } + } + return true +} + +// ContainsGroup return if groupID exists in the list of command groups. +func (c *Command) ContainsGroup(groupID string) bool { + for _, x := range c.commandgroups { + if x.ID == groupID { + return true + } + } + return false +} + +// AddGroup adds one or more command groups to this parent command. +func (c *Command) AddGroup(groups ...*Group) { + c.commandgroups = append(c.commandgroups, groups...) +} + // RemoveCommand removes one or more commands from a parent command. func (c *Command) RemoveCommand(cmds ...*Command) { commands := []*Command{} @@ -1224,16 +1430,24 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } return c.Name() } // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1298,7 +1512,7 @@ func (c *Command) Name() string { // HasAlias determines if a given string is an alias of the command. func (c *Command) HasAlias(s string) bool { for _, a := range c.Aliases { - if a == s { + if commandNameMatches(a, s) { return true } } @@ -1435,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1446,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1459,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1475,7 +1691,8 @@ func (c *Command) LocalFlags() *flag.FlagSet { } addToLocal := func(f *flag.Flag) { - if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { + // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag + if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) { c.lflags.AddFlag(f) } } @@ -1485,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1510,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1517,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1530,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1649,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1664,3 +1883,14 @@ func (c *Command) updateParentsPflags() { c.parentsPflags.AddFlagSet(parent.PersistentFlags()) }) } + +// commandNameMatches checks if two command names are equal +// taking into account case sensitivity according to +// EnableCaseInsensitive global configuration. +func commandNameMatches(s string, t string) bool { + if EnableCaseInsensitive { + return strings.EqualFold(s, t) + } + + return s == t +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 6159c1cc..307f0c12 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,3 +1,18 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows // +build !windows package cobra diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 8768b173..adbef395 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,3 +1,18 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows // +build windows package cobra diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go new file mode 100644 index 00000000..c0c08b05 --- /dev/null +++ b/vendor/github.com/spf13/cobra/completions.go @@ -0,0 +1,939 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/spf13/pflag" +) + +const ( + // ShellCompRequestCmd is the name of the hidden command that is used to request + // completion results from the program. It is used by the shell completion scripts. + ShellCompRequestCmd = "__complete" + // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request + // completion results without their description. It is used by the shell completion scripts. + ShellCompNoDescRequestCmd = "__completeNoDesc" +) + +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. +var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} + +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + +// ShellCompDirective is a bit map representing the different behaviors the shell +// can be instructed to have once completions have been provided. +type ShellCompDirective int + +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + +const ( + // ShellCompDirectiveError indicates an error occurred and completions should be ignored. + ShellCompDirectiveError ShellCompDirective = 1 << iota + + // ShellCompDirectiveNoSpace indicates that the shell should not add a space + // after the completion even if there is a single completion provided. + ShellCompDirectiveNoSpace + + // ShellCompDirectiveNoFileComp indicates that the shell should not provide + // file completion even when no completion is provided. + ShellCompDirectiveNoFileComp + + // ShellCompDirectiveFilterFileExt indicates that the provided completions + // should be used as file extension filters. + // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() + // is a shortcut to using this directive explicitly. The BashCompFilenameExt + // annotation can also be used to obtain the same behavior for flags. + ShellCompDirectiveFilterFileExt + + // ShellCompDirectiveFilterDirs indicates that only directory names should + // be provided in file completion. To request directory names within another + // directory, the returned completions should specify the directory within + // which to search. The BashCompSubdirsInDir annotation can be used to + // obtain the same behavior but only for flags. + ShellCompDirectiveFilterDirs + + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + + // =========================================================================== + + // All directives using iota should be above this one. + // For internal use. + shellCompDirectiveMaxValue + + // ShellCompDirectiveDefault indicates to let the shell perform its default + // behavior after completions have been provided. + // This one must be last to avoid messing up the iota count. + ShellCompDirectiveDefault ShellCompDirective = 0 +) + +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + +// FixedCompletions can be used to create a completion function which always +// returns the same results. +func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return choices, directive + } +} + +// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { + flag := c.Flag(flagName) + if flag == nil { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) + } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + + if _, exists := flagCompletionFunctions[flag]; exists { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) + } + flagCompletionFunctions[flag] = f + return nil +} + +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + +// Returns a string listing the different directive enabled in the specified parameter +func (d ShellCompDirective) string() string { + var directives []string + if d&ShellCompDirectiveError != 0 { + directives = append(directives, "ShellCompDirectiveError") + } + if d&ShellCompDirectiveNoSpace != 0 { + directives = append(directives, "ShellCompDirectiveNoSpace") + } + if d&ShellCompDirectiveNoFileComp != 0 { + directives = append(directives, "ShellCompDirectiveNoFileComp") + } + if d&ShellCompDirectiveFilterFileExt != 0 { + directives = append(directives, "ShellCompDirectiveFilterFileExt") + } + if d&ShellCompDirectiveFilterDirs != 0 { + directives = append(directives, "ShellCompDirectiveFilterDirs") + } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } + if len(directives) == 0 { + directives = append(directives, "ShellCompDirectiveDefault") + } + + if d >= shellCompDirectiveMaxValue { + return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) + } + return strings.Join(directives, ", ") +} + +// initCompleteCmd adds a special hidden command that can be used to request custom completions. +func (c *Command) initCompleteCmd(args []string) { + completeCmd := &Command{ + Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), + Aliases: []string{ShellCompNoDescRequestCmd}, + DisableFlagsInUseLine: true, + Hidden: true, + DisableFlagParsing: true, + Args: MinimumNArgs(1), + Short: "Request shell completion choices for the specified command-line", + Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", + "to request completion choices for the specified command-line.", ShellCompRequestCmd), + Run: func(cmd *Command, args []string) { + finalCmd, completions, directive, err := cmd.getCompletions(args) + if err != nil { + CompErrorln(err.Error()) + // Keep going for multiple reasons: + // 1- There could be some valid completions even though there was an error + // 2- Even without completions, we need to print the directive + } + + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() + for _, comp := range completions { + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue + } + if noDescriptions { + // Remove any description that may be included following a tab character. + comp = strings.SplitN(comp, "\t", 2)[0] + } + + // Make sure we only write the first line to the output. + // This is needed if a description contains a linebreak. + // Otherwise the shell scripts will interpret the other lines as new flags + // and could therefore provide a wrong completion. + comp = strings.SplitN(comp, "\n", 2)[0] + + // Finally trim the completion. This is especially important to get rid + // of a trailing tab when there are no description following it. + // For example, a sub-command without a description should not be completed + // with a tab at the end (or else zsh will show a -- following it + // although there is no description). + comp = strings.TrimSpace(comp) + + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) + } + + // As the last printout, print the completion directive for the completion script to parse. + // The directive integer must be that last character following a single colon (:). + // The completion script expects : + fmt.Fprintf(out, ":%d\n", directive) + + // Print some helpful info to stderr for the user to understand. + // Output from stderr must be ignored by the completion script. + fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) + }, + } + c.AddCommand(completeCmd) + subCmd, _, err := c.Find(args) + if err != nil || subCmd.Name() != ShellCompRequestCmd { + // Only create this special command if it is actually being called. + // This reduces possible side-effects of creating such a command; + // for example, having this command would cause problems to a + // cobra program that only consists of the root command, since this + // command would cause the root command to suddenly have a subcommand. + c.RemoveCommand(completeCmd) + } +} + +func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { + // The last argument, which is not completely typed by the user, + // should not be part of the list of arguments + toComplete := args[len(args)-1] + trimmedArgs := args[:len(args)-1] + + var finalCmd *Command + var finalArgs []string + var err error + // Find the real command for which completion must be performed + // check if we need to traverse here to parse local flags on parent commands + if c.Root().TraverseChildren { + finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) + } else { + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) + } + if err != nil { + // Unable to find the real command. E.g., someInvalidCmd + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) + } + finalCmd.ctx = c.ctx + + // These flags are normally added when `execute()` is called on `finalCmd`, + // however, when doing completion, we don't call `finalCmd.execute()`. + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } + + // Check if we are doing flag value completion before parsing the flags. + // This is important because if we are completing a flag value, we need to also + // remove the flag name argument from the list of finalArgs or else the parsing + // could fail due to an invalid value (incomplete) for the flag. + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() + + // Parse the flags early so we can check if required flags are set + if err = finalCmd.ParseFlags(finalArgs); err != nil { + return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + } + + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + // Look for the --help or --version flags. If they are present, + // there should be no further completions. + if helpOrVersionFlagPresent(finalCmd) { + return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + if flag != nil && flagCompletion { + // Check if we are completing a flag value subject to annotations + if validExts, present := flag.Annotations[BashCompFilenameExt]; present { + if len(validExts) != 0 { + // File completion filtered by extensions + return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil + } + + // The annotation requests simple file completion. There is no reason to do + // that since it is the default behavior anyway. Let's ignore this annotation + // in case the program also registered a completion function for this flag. + // Even though it is a mistake on the program's side, let's be nice when we can. + } + + if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { + if len(subDir) == 1 { + // Directory completion from within a directory + return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil + } + // Directory completion + return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + } + } + + var completions []string + var directive ShellCompDirective + + // Enforce flag groups before doing flag completions + finalCmd.enforceFlagGroupsForCompletion() + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistent flag names even for commands that disable flag parsing. + // + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires + // the flag name to be complete + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { + // First check for required flags + completions = completeRequireFlags(finalCmd, toComplete) + + // If we have not found any required flags, only then can we show regular flags + if len(completions) == 0 { + doCompleteFlags := func(flag *pflag.Flag) { + if !flag.Changed || + strings.Contains(flag.Value.Type(), "Slice") || + strings.Contains(flag.Value.Type(), "Array") { + // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + // we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + } + + directive = ShellCompDirectiveNoFileComp + if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { + // If there is a single completion, the shell usually adds a space + // after the completion. We don't want that if the flag ends with an = + directive = ShellCompDirectiveNoSpace + } + + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil + } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } + + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp + } + } + } + + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } + } + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } + } + } + } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil + } + + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. + } + } + + // Find the completion function for the flag or command + var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() + completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() + } else { + completionFn = finalCmd.ValidArgsFunction + } + if completionFn != nil { + // Go custom completion defined for this flag or command. + // Call the registered completion function to get the completions. + var comps []string + comps, directive = completionFn(finalCmd, finalArgs, toComplete) + completions = append(completions, comps...) + } + + return finalCmd, completions, directive, nil +} + +func helpOrVersionFlagPresent(cmd *Command) bool { + if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil && + len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { + return true + } + if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { + return true + } + return false +} + +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { + if nonCompletableFlag(flag) { + return []string{} + } + + var completions []string + flagName := "--" + flag.Name + if strings.HasPrefix(flagName, toComplete) { + // Flag without the = + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + + // Why suggest both long forms: --flag and --flag= ? + // This forces the user to *always* have to type either an = or a space after the flag name. + // Let's be nice and avoid making users have to do that. + // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. + // The = form will still work, we just won't suggest it. + // This also makes the list of suggested flags shorter as we avoid all the = forms. + // + // if len(flag.NoOptDefVal) == 0 { + // // Flag requires a value, so it can be suffixed with = + // flagName += "=" + // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // } + } + + flagName = "-" + flag.Shorthand + if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + } + + return completions +} + +func completeRequireFlags(finalCmd *Command, toComplete string) []string { + var completions []string + + doCompleteRequiredFlags := func(flag *pflag.Flag) { + if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { + if !flag.Changed { + // If the flag is not already present, we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + + return completions +} + +func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { + if finalCmd.DisableFlagParsing { + // We only do flag completion if we are allowed to parse flags + // This is important for commands which have requested to do their own flag completion. + return nil, args, lastArg, nil + } + + var flagName string + trimmedArgs := args + flagWithEqual := false + orgLastArg := lastArg + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as that function + // requires the flag name to be complete + if len(lastArg) > 0 && lastArg[0] == '-' { + if index := strings.Index(lastArg, "="); index >= 0 { + // Flag with an = + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } + lastArg = lastArg[index+1:] + flagWithEqual = true + } else { + // Normal flag completion + return nil, args, lastArg, nil + } + } + + if len(flagName) == 0 { + if len(args) > 0 { + prevArg := args[len(args)-1] + if isFlagArg(prevArg) { + // Only consider the case where the flag does not contain an =. + // If the flag contains an = it means it has already been fully processed, + // so we don't need to deal with it here. + if index := strings.Index(prevArg, "="); index < 0 { + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } + // Remove the uncompleted flag or else there could be an error created + // for an invalid value for that flag + trimmedArgs = args[:len(args)-1] + } + } + } + } + + if len(flagName) == 0 { + // Not doing flag completion + return nil, trimmedArgs, lastArg, nil + } + + flag := findFlag(finalCmd, flagName) + if flag == nil { + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} + } + + if !flagWithEqual { + if len(flag.NoOptDefVal) != 0 { + // We had assumed dealing with a two-word flag but the flag is a boolean flag. + // In that case, there is no value following it, so we are not really doing flag completion. + // Reset everything to do noun completion. + trimmedArgs = args + flag = nil + } + } + + return flag, trimmedArgs, lastArg, nil +} + +// InitDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) InitDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, + GroupID: c.completionCommandGroupID, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "Generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(%[1]s completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(%[1]s completion zsh) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + %[1]s completion fish | source + +To load completions for every new session, execute once: + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + +func findFlag(cmd *Command, name string) *pflag.Flag { + flagSet := cmd.Flags() + if len(name) == 1 { + // First convert the short flag into a long flag + // as the cmd.Flag() search only accepts long flags + if short := flagSet.ShorthandLookup(name); short != nil { + name = short.Name + } else { + set := cmd.InheritedFlags() + if short = set.ShorthandLookup(name); short != nil { + name = short.Name + } else { + return nil + } + } + } + return cmd.Flag(name) +} + +// CompDebug prints the specified string to the same file as where the +// completion script prints its logs. +// Note that completion printouts should never be on stdout as they would +// be wrongly interpreted as actual completion choices by the completion script. +func CompDebug(msg string, printToStdErr bool) { + msg = fmt.Sprintf("[Debug] %s", msg) + + // Such logs are only printed when the user has set the environment + // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. + if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { + f, err := os.OpenFile(path, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + defer f.Close() + WriteStringAndCheck(f, msg) + } + } + + if printToStdErr { + // Must print to stderr for this not to be read by the completion script. + fmt.Fprint(os.Stderr, msg) + } +} + +// CompDebugln prints the specified string with a newline at the end +// to the same file as where the completion script prints its logs. +// Such logs are only printed when the user has set the environment +// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. +func CompDebugln(msg string, printToStdErr bool) { + CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) +} + +// CompError prints the specified completion message to stderr. +func CompError(msg string) { + msg = fmt.Sprintf("[Error] %s", msg) + CompDebug(msg, true) +} + +// CompErrorln prints the specified completion message to stderr with a newline at the end. +func CompErrorln(msg string) { + CompError(fmt.Sprintf("%s\n", msg)) +} + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go deleted file mode 100644 index fa060c14..00000000 --- a/vendor/github.com/spf13/cobra/custom_completions.go +++ /dev/null @@ -1,557 +0,0 @@ -package cobra - -import ( - "fmt" - "os" - "strings" - - "github.com/spf13/pflag" -) - -const ( - // ShellCompRequestCmd is the name of the hidden command that is used to request - // completion results from the program. It is used by the shell completion scripts. - ShellCompRequestCmd = "__complete" - // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request - // completion results without their description. It is used by the shell completion scripts. - ShellCompNoDescRequestCmd = "__completeNoDesc" -) - -// Global map of flag completion functions. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} - -// ShellCompDirective is a bit map representing the different behaviors the shell -// can be instructed to have once completions have been provided. -type ShellCompDirective int - -const ( - // ShellCompDirectiveError indicates an error occurred and completions should be ignored. - ShellCompDirectiveError ShellCompDirective = 1 << iota - - // ShellCompDirectiveNoSpace indicates that the shell should not add a space - // after the completion even if there is a single completion provided. - ShellCompDirectiveNoSpace - - // ShellCompDirectiveNoFileComp indicates that the shell should not provide - // file completion even when no completion is provided. - // This currently does not work for zsh or bash < 4 - ShellCompDirectiveNoFileComp - - // ShellCompDirectiveFilterFileExt indicates that the provided completions - // should be used as file extension filters. - // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() - // is a shortcut to using this directive explicitly. The BashCompFilenameExt - // annotation can also be used to obtain the same behavior for flags. - ShellCompDirectiveFilterFileExt - - // ShellCompDirectiveFilterDirs indicates that only directory names should - // be provided in file completion. To request directory names within another - // directory, the returned completions should specify the directory within - // which to search. The BashCompSubdirsInDir annotation can be used to - // obtain the same behavior but only for flags. - ShellCompDirectiveFilterDirs - - // =========================================================================== - - // All directives using iota should be above this one. - // For internal use. - shellCompDirectiveMaxValue - - // ShellCompDirectiveDefault indicates to let the shell perform its default - // behavior after completions have been provided. - // This one must be last to avoid messing up the iota count. - ShellCompDirectiveDefault ShellCompDirective = 0 -) - -// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { - flag := c.Flag(flagName) - if flag == nil { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) - } - if _, exists := flagCompletionFunctions[flag]; exists { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) - } - flagCompletionFunctions[flag] = f - return nil -} - -// Returns a string listing the different directive enabled in the specified parameter -func (d ShellCompDirective) string() string { - var directives []string - if d&ShellCompDirectiveError != 0 { - directives = append(directives, "ShellCompDirectiveError") - } - if d&ShellCompDirectiveNoSpace != 0 { - directives = append(directives, "ShellCompDirectiveNoSpace") - } - if d&ShellCompDirectiveNoFileComp != 0 { - directives = append(directives, "ShellCompDirectiveNoFileComp") - } - if d&ShellCompDirectiveFilterFileExt != 0 { - directives = append(directives, "ShellCompDirectiveFilterFileExt") - } - if d&ShellCompDirectiveFilterDirs != 0 { - directives = append(directives, "ShellCompDirectiveFilterDirs") - } - if len(directives) == 0 { - directives = append(directives, "ShellCompDirectiveDefault") - } - - if d >= shellCompDirectiveMaxValue { - return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) - } - return strings.Join(directives, ", ") -} - -// Adds a special hidden command that can be used to request custom completions. -func (c *Command) initCompleteCmd(args []string) { - completeCmd := &Command{ - Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), - Aliases: []string{ShellCompNoDescRequestCmd}, - DisableFlagsInUseLine: true, - Hidden: true, - DisableFlagParsing: true, - Args: MinimumNArgs(1), - Short: "Request shell completion choices for the specified command-line", - Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", - "to request completion choices for the specified command-line.", ShellCompRequestCmd), - Run: func(cmd *Command, args []string) { - finalCmd, completions, directive, err := cmd.getCompletions(args) - if err != nil { - CompErrorln(err.Error()) - // Keep going for multiple reasons: - // 1- There could be some valid completions even though there was an error - // 2- Even without completions, we need to print the directive - } - - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) - for _, comp := range completions { - if noDescriptions { - // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] - } - - // Make sure we only write the first line to the output. - // This is needed if a description contains a linebreak. - // Otherwise the shell scripts will interpret the other lines as new flags - // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] - - // Finally trim the completion. This is especially important to get rid - // of a trailing tab when there are no description following it. - // For example, a sub-command without a description should not be completed - // with a tab at the end (or else zsh will show a -- following it - // although there is no description). - comp = strings.TrimSpace(comp) - - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) - } - - if directive >= shellCompDirectiveMaxValue { - directive = ShellCompDirectiveDefault - } - - // As the last printout, print the completion directive for the completion script to parse. - // The directive integer must be that last character following a single colon (:). - // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) - - // Print some helpful info to stderr for the user to understand. - // Output from stderr must be ignored by the completion script. - fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) - }, - } - c.AddCommand(completeCmd) - subCmd, _, err := c.Find(args) - if err != nil || subCmd.Name() != ShellCompRequestCmd { - // Only create this special command if it is actually being called. - // This reduces possible side-effects of creating such a command; - // for example, having this command would cause problems to a - // cobra program that only consists of the root command, since this - // command would cause the root command to suddenly have a subcommand. - c.RemoveCommand(completeCmd) - } -} - -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { - // The last argument, which is not completely typed by the user, - // should not be part of the list of arguments - toComplete := args[len(args)-1] - trimmedArgs := args[:len(args)-1] - - var finalCmd *Command - var finalArgs []string - var err error - // Find the real command for which completion must be performed - // check if we need to traverse here to parse local flags on parent commands - if c.Root().TraverseChildren { - finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) - } else { - finalCmd, finalArgs, err = c.Root().Find(trimmedArgs) - } - if err != nil { - // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) - } - - // Check if we are doing flag value completion before parsing the flags. - // This is important because if we are completing a flag value, we need to also - // remove the flag name argument from the list of finalArgs or else the parsing - // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, err := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - if err != nil { - // Error while attempting to parse flags - return finalCmd, []string{}, ShellCompDirectiveDefault, err - } - - // Parse the flags early so we can check if required flags are set - if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) - } - - if flag != nil { - // Check if we are completing a flag value subject to annotations - if validExts, present := flag.Annotations[BashCompFilenameExt]; present { - if len(validExts) != 0 { - // File completion filtered by extensions - return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil - } - - // The annotation requests simple file completion. There is no reason to do - // that since it is the default behavior anyway. Let's ignore this annotation - // in case the program also registered a completion function for this flag. - // Even though it is a mistake on the program's side, let's be nice when we can. - } - - if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { - if len(subDir) == 1 { - // Directory completion from within a directory - return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil - } - // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil - } - } - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires - // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") { - var completions []string - - // First check for required flags - completions = completeRequireFlags(finalCmd, toComplete) - - // If we have not found any required flags, only then can we show regular flags - if len(completions) == 0 { - doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || - strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) - // we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - } - - directive := ShellCompDirectiveNoFileComp - if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { - // If there is a single completion, the shell usually adds a space - // after the completion. We don't want that if the flag ends with an = - directive = ShellCompDirectiveNoSpace - } - return finalCmd, completions, directive, nil - } - - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - var completions []string - directive := ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) - } - - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-peristent flag on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - directive = ShellCompDirectiveNoFileComp - } - } - } - - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) - } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) - } - } - } - } - - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil - } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. - } - - // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil { - completionFn = flagCompletionFunctions[flag] - } else { - completionFn = finalCmd.ValidArgsFunction - } - if completionFn != nil { - // Go custom completion defined for this flag or command. - // Call the registered completion function to get the completions. - var comps []string - comps, directive = completionFn(finalCmd, finalArgs, toComplete) - completions = append(completions, comps...) - } - - return finalCmd, completions, directive, nil -} - -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { - if nonCompletableFlag(flag) { - return []string{} - } - - var completions []string - flagName := "--" + flag.Name - if strings.HasPrefix(flagName, toComplete) { - // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - - // Why suggest both long forms: --flag and --flag= ? - // This forces the user to *always* have to type either an = or a space after the flag name. - // Let's be nice and avoid making users have to do that. - // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. - // The = form will still work, we just won't suggest it. - // This also makes the list of suggested flags shorter as we avoid all the = forms. - // - // if len(flag.NoOptDefVal) == 0 { - // // Flag requires a value, so it can be suffixed with = - // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - // } - } - - flagName = "-" + flag.Shorthand - if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - - return completions -} - -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string - - doCompleteRequiredFlags := func(flag *pflag.Flag) { - if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { - if !flag.Changed { - // If the flag is not already present, we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - - return completions -} - -func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { - if finalCmd.DisableFlagParsing { - // We only do flag completion if we are allowed to parse flags - // This is important for commands which have requested to do their own flag completion. - return nil, args, lastArg, nil - } - - var flagName string - trimmedArgs := args - flagWithEqual := false - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as that function - // requires the flag name to be complete - if len(lastArg) > 0 && lastArg[0] == '-' { - if index := strings.Index(lastArg, "="); index >= 0 { - // Flag with an = - flagName = strings.TrimLeft(lastArg[:index], "-") - lastArg = lastArg[index+1:] - flagWithEqual = true - } else { - // Normal flag completion - return nil, args, lastArg, nil - } - } - - if len(flagName) == 0 { - if len(args) > 0 { - prevArg := args[len(args)-1] - if isFlagArg(prevArg) { - // Only consider the case where the flag does not contain an =. - // If the flag contains an = it means it has already been fully processed, - // so we don't need to deal with it here. - if index := strings.Index(prevArg, "="); index < 0 { - flagName = strings.TrimLeft(prevArg, "-") - - // Remove the uncompleted flag or else there could be an error created - // for an invalid value for that flag - trimmedArgs = args[:len(args)-1] - } - } - } - } - - if len(flagName) == 0 { - // Not doing flag completion - return nil, trimmedArgs, lastArg, nil - } - - flag := findFlag(finalCmd, flagName) - if flag == nil { - // Flag not supported by this command, nothing to complete - err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName) - return nil, nil, "", err - } - - if !flagWithEqual { - if len(flag.NoOptDefVal) != 0 { - // We had assumed dealing with a two-word flag but the flag is a boolean flag. - // In that case, there is no value following it, so we are not really doing flag completion. - // Reset everything to do noun completion. - trimmedArgs = args - flag = nil - } - } - - return flag, trimmedArgs, lastArg, nil -} - -func findFlag(cmd *Command, name string) *pflag.Flag { - flagSet := cmd.Flags() - if len(name) == 1 { - // First convert the short flag into a long flag - // as the cmd.Flag() search only accepts long flags - if short := flagSet.ShorthandLookup(name); short != nil { - name = short.Name - } else { - set := cmd.InheritedFlags() - if short = set.ShorthandLookup(name); short != nil { - name = short.Name - } else { - return nil - } - } - } - return cmd.Flag(name) -} - -// CompDebug prints the specified string to the same file as where the -// completion script prints its logs. -// Note that completion printouts should never be on stdout as they would -// be wrongly interpreted as actual completion choices by the completion script. -func CompDebug(msg string, printToStdErr bool) { - msg = fmt.Sprintf("[Debug] %s", msg) - - // Such logs are only printed when the user has set the environment - // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. - if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { - f, err := os.OpenFile(path, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - WriteStringAndCheck(f, msg) - } - } - - if printToStdErr { - // Must print to stderr for this not to be read by the completion script. - fmt.Fprint(os.Stderr, msg) - } -} - -// CompDebugln prints the specified string with a newline at the end -// to the same file as where the completion script prints its logs. -// Such logs are only printed when the user has set the environment -// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. -func CompDebugln(msg string, printToStdErr bool) { - CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) -} - -// CompError prints the specified completion message to stderr. -func CompError(msg string) { - msg = fmt.Sprintf("[Error] %s", msg) - CompDebug(msg, true) -} - -// CompErrorln prints the specified completion message to stderr with a newline at the end. -func CompErrorln(msg string) { - CompError(fmt.Sprintf("%s\n", msg)) -} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 3e112347..12d61b69 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cobra import ( @@ -11,8 +25,8 @@ import ( func genFishComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { @@ -21,44 +35,48 @@ func genFishComp(buf io.StringWriter, name string, includeDesc bool) { WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) WriteStringAndCheck(buf, fmt.Sprintf(` function __%[1]s_debug - set file "$BASH_COMP_DEBUG_FILE" + set -l file "$BASH_COMP_DEBUG_FILE" if test -n "$file" echo "$argv" >> $file end end function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv" + __%[1]s_debug "Starting __%[1]s_perform_completion" - set args (string split -- " " "$argv") - set lastArg "$args[-1]" + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) __%[1]s_debug "args: $args" __%[1]s_debug "last arg: $lastArg" - set emptyArg "" - if test -z "$lastArg" - __%[1]s_debug "Setting emptyArg" - set emptyArg \"\" - end - __%[1]s_debug "emptyArg: $emptyArg" + # Disable ActiveHelp which is not supported for fish shell + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" - if not type -q "$args[1]" - # This can happen when "complete --do-complete %[2]s" is called when running this script. - __%[1]s_debug "Cannot find $args[1]. No completions." - return - end - - set requestComp "$args[1] %[3]s $args[2..-1] $emptyArg" __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) - set results (eval $requestComp 2> /dev/null) - set comps $results[1..-2] - set directiveLine $results[-1] + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] # For Fish, when completing a flag with an = (e.g., -n=) # completions must be prefixed with the flag - set flagPrefix (string match -r -- '-.*=' "$lastArg") + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") __%[1]s_debug "Comps: $comps" __%[1]s_debug "DirectiveLine: $directiveLine" @@ -71,120 +89,187 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end -# This function does three things: -# 1- Obtain the completions and store them in the global __%[1]s_comp_results -# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed -# and unset it otherwise -# 3- Return true if the completion results are not empty -function __%[1]s_prepare_completions - # Start fresh - set --erase __%[1]s_comp_do_file_comp - set --erase __%[1]s_comp_results +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" - # Check if the command-line is already provided. This is useful for testing. - if not set --query __%[1]s_comp_commandLine - # Use the -c flag to allow for completion in the middle of the line - set __%[1]s_comp_commandLine (commandline -c) + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 end - __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine" - set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine") - set --erase __%[1]s_comp_commandLine - __%[1]s_debug "Completion results: $results" - - if test -z "$results" - __%[1]s_debug "No completion, probably due to a failure" - # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" return 1 end - set directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed +function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + + # Start fresh + set --erase __%[1]s_comp_results + + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" + + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completion, probably due to a failure" + # Might as well do file completion, in case it helps + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" - set shellCompDirectiveError %[4]d - set shellCompDirectiveNoSpace %[5]d - set shellCompDirectiveNoFileComp %[6]d - set shellCompDirectiveFilterFileExt %[7]d - set shellCompDirectiveFilterDirs %[8]d + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d if test -z "$directive" set directive 0 end - set compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) if test $compErr -eq 1 __%[1]s_debug "Received error directive: aborting." # Might as well do file completion, in case it helps - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) if test $filefilter -eq 1; or test $dirfilter -eq 1 __%[1]s_debug "File extension filtering or directory filtering not supported" # Do full file completion instead - set --global __%[1]s_comp_do_file_comp 1 return 1 end - set nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - # Important not to quote the variable for count to work - set numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" - if test $numComps -eq 1; and test $nospace -ne 0 - # To support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --append __%[1]s_comp_results $__%[1]s_comp_results[1]. + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end end - if test $numComps -eq 0; and test $nofiles -eq 0 - __%[1]s_debug "Requesting file completion" - set --global __%[1]s_comp_do_file_comp 1 - end - - # If we don't want file completion, we must return true even if there - # are no completions found. This is because fish will perform the last - # completion command, even if its condition is false, if no other - # completion command was triggered - return (not set --query __%[1]s_comp_do_file_comp) + return 0 end # Since Fish completions are only loaded once the user triggers them, we trigger them ourselves # so we can properly delete any completions provided by another script. -# The space after the the program name is essential to trigger completion for the program -# and not completion of the program name itself. -complete --do-complete "%[2]s " > /dev/null 2>&1 -# Using '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e -# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions -# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable. -# -# This completion will be run second as complete commands are added FILO. -# It triggers file completion choices when __%[1]s_comp_do_file_comp is set. -complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp' - -# This completion will be run first as complete commands are added FILO. -# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp. -# It provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } // GenFishCompletion generates fish completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed12..00000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go new file mode 100644 index 00000000..560612fd --- /dev/null +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -0,0 +1,290 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const ( + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" +) + +// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors +// if the command is invoked with a subset (but not all) of the given flags. +func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) + } + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors +// if the command is invoked with more than one flag from the given set of flags. +func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) + } + // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { + panic(err) + } + } +} + +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the +// first error encountered. +func (c *Command) ValidateFlagGroups() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + + // groupStatus format is the list of flags as a unique ID, + // then a map of each flag name and whether it is set or not. + groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + flags.VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) + }) + + if err := validateRequiredFlagGroups(groupStatus); err != nil { + return err + } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } + if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { + return err + } + return nil +} + +func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { + for _, fname := range flagnames { + f := fs.Lookup(fname) + if f == nil { + return false + } + } + return true +} + +func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { + groupInfo, found := pflag.Annotations[annotation] + if found { + for _, group := range groupInfo { + if groupStatus[group] == nil { + flagnames := strings.Split(group, " ") + + // Only consider this flag group at all if all the flags are defined. + if !hasAllFlags(flags, flagnames...) { + continue + } + + groupStatus[group] = make(map[string]bool, len(flagnames)) + for _, name := range flagnames { + groupStatus[group][name] = false + } + } + + groupStatus[group][pflag.Name] = pflag.Changed + } + } +} + +func validateRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + + unset := []string{} + for flagname, isSet := range flagnameAndStatus { + if !isSet { + unset = append(unset, flagname) + } + } + if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(unset) + return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) + } + + return nil +} + +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + +func validateExclusiveFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) == 0 || len(set) == 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) + } + return nil +} + +func sortedKeys(m map[string]map[string]bool) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// enforceFlagGroupsForCompletion will do the following: +// - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required +// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden +// This allows the standard completion logic to behave appropriately for flag groups +func (c *Command) enforceFlagGroupsForCompletion() { + if c.DisableFlagParsing { + return + } + + flags := c.Flags() + groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + c.Flags().VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) + }) + + // If a flag that is part of a group is present, we make all the other flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range groupStatus { + for _, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the group is set, mark the other ones as required + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + } + + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + isSet := false + + for _, isSet = range flagnameAndStatus { + if isSet { + break + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if !isSet { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + + // If a flag that is mutually exclusive to others is present, we hide the other + // flags of that group so the shell completion does not suggest them + for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { + for flagName, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the mutually exclusive group is set, mark the other ones as hidden + // Don't mark the flag that is already set as hidden because it may be an + // array or slice flag and therefore must continue being suggested + for _, fName := range strings.Split(flagList, " ") { + if fName != flagName { + flag := c.Flags().Lookup(fName) + flag.Hidden = true + } + } + } + } + } +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index c55be71c..a830b7bc 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // The generated scripts require PowerShell v5.0+ (which comes Windows 10, but // can be downloaded separately for windows 7 or 8.1). @@ -8,9 +22,15 @@ import ( "fmt" "io" "os" + "strings" ) func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") + compCmd := ShellCompRequestCmd if !includeDesc { compCmd = ShellCompNoDescRequestCmd @@ -27,7 +47,7 @@ filter __%[1]s_escapeStringWithSpecialChars { `+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { +[scriptblock]${__%[2]sCompleterBlock} = { param( $WordToComplete, $CommandAst, @@ -50,18 +70,20 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { if ($Command.Length -gt $CursorPosition) { $Command=$Command.Substring(0,$CursorPosition) } - __%[1]s_debug "Truncated command: $Command" + __%[1]s_debug "Truncated command: $Command" - $ShellCompDirectiveError=%[3]d - $ShellCompDirectiveNoSpace=%[4]d - $ShellCompDirectiveNoFileComp=%[5]d - $ShellCompDirectiveFilterFileExt=%[6]d - $ShellCompDirectiveFilterDirs=%[7]d + $ShellCompDirectiveError=%[4]d + $ShellCompDirectiveNoSpace=%[5]d + $ShellCompDirectiveNoFileComp=%[6]d + $ShellCompDirectiveFilterFileExt=%[7]d + $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d - # Prepare the command to request completions for the program. + # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. $Program,$Arguments = $Command.Split(" ",2) - $RequestComp="$Program %[2]s $Arguments" + + $RequestComp="$Program %[3]s $Arguments" __%[1]s_debug "RequestComp: $RequestComp" # we cannot use $WordToComplete because it @@ -85,16 +107,27 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+` + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } } __%[1]s_debug "Calling $RequestComp" + # First disable ActiveHelp which is not supported for Powershell + ${env:%[10]s}=0 + #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null - # get directive from last line [int]$Directive = $Out[-1].TrimStart(':') if ($Directive -eq "") { @@ -114,7 +147,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { } $Longest = 0 - $Values = $Out | ForEach-Object { + [Array]$Values = $Out | ForEach-Object { #Split the output in name and description `+" $Name, $Description = $_.Split(\"`t\",2)"+` __%[1]s_debug "Name: $Name Description: $Description" @@ -140,6 +173,30 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { $Space = "" } + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -153,32 +210,13 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { } } - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or - (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { - __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" - - # return here to prevent the completion of the extensions - return - } - - $Values = $Values | Where-Object { - # filter the result - $_.Name -like "$WordToComplete*" - - # Join the flag back if we have a equal sign flag - if ( $IsEqualFlag ) { - __%[1]s_debug "Join the equal sign flag back to the completion value" - $_.Name = $Flag + "=" + $_.Name - } - } - # Get the current mode $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function __%[1]s_debug "Mode: $Mode" $Values | ForEach-Object { - # store temporay because switch will overwrite $_ + # store temporary because switch will overwrite $_ $comp = $_ # PowerShell supports three different completion modes @@ -233,16 +271,18 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { Default { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. - # Description will not be shown because thats not possible with TabCompleteNext + # Description will not be shown because that's not possible with TabCompleteNext [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") } } } } -`, name, compCmd, + +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} +`, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5..00000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index d98a71e3..00000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,38 +0,0 @@ -## Projects using Cobra - -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](http://www.blevesearch.com/) -- [CockroachDB](http://www.cockroachlabs.com/) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [Github CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](http://www.gopherjs.org/) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](http://kubernetes.io/) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go index d99bf91e..b035742d 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cobra import ( diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index cd533ac3..00000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,483 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -If you are using the generator, you can create a completion command by running - -```bash -cobra add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: `To load completions: - -Bash: - - $ source <(yourprogram completion bash) - - # To load completions for each session, execute once: - # Linux: - $ yourprogram completion bash > /etc/bash_completion.d/yourprogram - # macOS: - $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ yourprogram completion fish | source - - # To load completions for each session, execute once: - $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish - -PowerShell: - - PS> yourprogram completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> yourprogram completion powershell > yourprogram.ps1 - # and source this file from your PowerShell profile. -`, - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add annotations to your own completions. Simply add the annotation text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 2e840285..1856e4c7 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,3 +1,17 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cobra import ( @@ -75,7 +89,8 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) { if !includeDesc { compCmd = ShellCompNoDescRequestCmd } - WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- @@ -94,8 +109,9 @@ _%[1]s() local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -163,8 +179,24 @@ _%[1]s() return fi - compCount=0 + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + local startIndex=$((${#activeHelpMarker}+1)) + local hasActiveHelp=0 while IFS='\n' read -r comp; do + # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) + if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then + __%[1]s_debug "ActiveHelp found: $comp" + comp="${comp[$startIndex,-1]}" + if [ -n "$comp" ]; then + compadd -x "${comp}" + __%[1]s_debug "ActiveHelp will need delimiter" + hasActiveHelp=1 + fi + + continue + fi + if [ -n "$comp" ]; then # If requested, completions are returned with a description. # The description is preceded by a TAB character. @@ -172,16 +204,36 @@ _%[1]s() # We first need to escape any : as part of the completion itself. comp=${comp//:/\\:} - local tab=$(printf '\t') + local tab="$(printf '\t')" comp=${comp//$tab/:} - ((compCount++)) __%[1]s_debug "Adding completion: ${comp}" completions+=${comp} lastComp=$comp fi done < <(printf "%%s\n" "${out[@]}") + # Add a delimiter after the activeHelp statements, but only if: + # - there are completions following the activeHelp statements, or + # - file completion will be performed (so there will be choices after the activeHelp) + if [ $hasActiveHelp -eq 1 ]; then + if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then + __%[1]s_debug "Adding activeHelp delimiter" + compadd -x "--" + hasActiveHelp=0 + fi + fi + + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -199,7 +251,7 @@ _%[1]s() _arguments '*:filename:'"$filteringCmd" elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir subdir="${completions[1]}" if [ -n "$subdir" ]; then __%[1]s_debug "Listing directories in $subdir" @@ -208,33 +260,49 @@ _%[1]s() __%[1]s_debug "Listing directories in ." fi + local result _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? if [ -n "$subdir" ]; then popd >/dev/null 2>&1 fi - elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then - __%[1]s_debug "Activating nospace." - # We can use compadd here as there is no description when - # there is only one completion. - compadd -S '' "${lastComp}" - elif [ ${compCount} -eq 0 ]; then - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" - else - # Perform file completion - __%[1]s_debug "activating file completion" - _arguments '*:filename:_files'" ${flagPrefix}" - fi + return $result else - _describe "completions" completions $(echo $flagPrefix) + __%[1]s_debug "Calling _describe" + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi fi } # don't run the completion function when being source-ed or eval-ed if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s + _%[1]s fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) } diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff6178..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore deleted file mode 100644 index 00268614..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/spf13/jwalterweatherman/BUILD.bazel b/vendor/github.com/spf13/jwalterweatherman/BUILD.bazel deleted file mode 100644 index 10533cd1..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "jwalterweatherman", - srcs = [ - "default_notepad.go", - "log_counter.go", - "notepad.go", - ], - importmap = "peridot.resf.org/vendor/github.com/spf13/jwalterweatherman", - importpath = "github.com/spf13/jwalterweatherman", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md deleted file mode 100644 index 932a23fc..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/README.md +++ /dev/null @@ -1,148 +0,0 @@ -jWalterWeatherman -================= - -Seamless printing to the terminal (stdout) and logging to a io.Writer -(file) that’s as easy to use as fmt.Println. - -![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg) -Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422) - -JWW is primarily a wrapper around the excellent standard log library. It -provides a few advantages over using the standard log library alone. - -1. Ready to go out of the box. -2. One library for both printing to the terminal and logging (to files). -3. Really easy to log to either a temp file or a file you specify. - - -I really wanted a very straightforward library that could seamlessly do -the following things. - -1. Replace all the println, printf, etc statements thoughout my code with - something more useful -2. Allow the user to easily control what levels are printed to stdout -3. Allow the user to easily control what levels are logged -4. Provide an easy mechanism (like fmt.Println) to print info to the user - which can be easily logged as well -5. Due to 2 & 3 provide easy verbose mode for output and logs -6. Not have any unnecessary initialization cruft. Just use it. - -# Usage - -## Step 1. Use it -Put calls throughout your source based on type of feedback. -No initialization or setup needs to happen. Just start calling things. - -Available Loggers are: - - * TRACE - * DEBUG - * INFO - * WARN - * ERROR - * CRITICAL - * FATAL - -These each are loggers based on the log standard library and follow the -standard usage. Eg. - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - ... - - if err != nil { - - // This is a pretty serious error and the user should know about - // it. It will be printed to the terminal as well as logged under the - // default thresholds. - - jww.ERROR.Println(err) - } - - if err2 != nil { - // This error isn’t going to materially change the behavior of the - // application, but it’s something that may not be what the user - // expects. Under the default thresholds, Warn will be logged, but - // not printed to the terminal. - - jww.WARN.Println(err2) - } - - // Information that’s relevant to what’s happening, but not very - // important for the user. Under the default thresholds this will be - // discarded. - - jww.INFO.Printf("information %q", response) - -``` - -NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook: - -```go -notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) -notepad.WARN.Println("Some warning"") -``` - -_Why 7 levels?_ - -Maybe you think that 7 levels are too much for any application... and you -are probably correct. Just because there are seven levels doesn’t mean -that you should be using all 7 levels. Pick the right set for your needs. -Remember they only have to mean something to your project. - -## Step 2. Optionally configure JWW - -Under the default thresholds : - - * Debug, Trace & Info goto /dev/null - * Warn and above is logged (when a log file/io.Writer is provided) - * Error and above is printed to the terminal (stdout) - -### Changing the thresholds - -The threshold can be changed at any time, but will only affect calls that -execute after the change was made. - -This is very useful if your application has a verbose mode. Of course you -can decide what verbose means to you or even have multiple levels of -verbosity. - - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - if Verbose { - jww.SetLogThreshold(jww.LevelTrace) - jww.SetStdoutThreshold(jww.LevelInfo) - } -``` - -Note that JWW's own internal output uses log levels as well, so set the log -level before making any other calls if you want to see what it's up to. - - -### Setting a log file - -JWW can log to any `io.Writer`: - - -```go - - jww.SetLogOutput(customWriter) - -``` - - -# More information - -This is an early release. I’ve been using it for a while and this is the -third interface I’ve tried. I like this one pretty well, but no guarantees -that it won’t change a bit. - -I wrote this for use in [hugo](https://gohugo.io). If you are looking -for a static website engine that’s super fast please checkout Hugo. diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go deleted file mode 100644 index bcb76340..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "io/ioutil" - "log" - "os" -) - -var ( - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - defaultNotepad *Notepad -) - -func reloadDefaultNotepad() { - TRACE = defaultNotepad.TRACE - DEBUG = defaultNotepad.DEBUG - INFO = defaultNotepad.INFO - WARN = defaultNotepad.WARN - ERROR = defaultNotepad.ERROR - CRITICAL = defaultNotepad.CRITICAL - FATAL = defaultNotepad.FATAL - - LOG = defaultNotepad.LOG - FEEDBACK = defaultNotepad.FEEDBACK -} - -func init() { - defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) - reloadDefaultNotepad() -} - -// SetLogThreshold set the log threshold for the default notepad. Trace by default. -func SetLogThreshold(threshold Threshold) { - defaultNotepad.SetLogThreshold(threshold) - reloadDefaultNotepad() -} - -// SetLogOutput set the log output for the default notepad. Discarded by default. -func SetLogOutput(handle io.Writer) { - defaultNotepad.SetLogOutput(handle) - reloadDefaultNotepad() -} - -// SetStdoutThreshold set the standard output threshold for the default notepad. -// Info by default. -func SetStdoutThreshold(threshold Threshold) { - defaultNotepad.SetStdoutThreshold(threshold) - reloadDefaultNotepad() -} - -// SetPrefix set the prefix for the default logger. Empty by default. -func SetPrefix(prefix string) { - defaultNotepad.SetPrefix(prefix) - reloadDefaultNotepad() -} - -// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. -func SetFlags(flags int) { - defaultNotepad.SetFlags(flags) - reloadDefaultNotepad() -} - -// Level returns the current global log threshold. -func LogThreshold() Threshold { - return defaultNotepad.logThreshold -} - -// Level returns the current global output threshold. -func StdoutThreshold() Threshold { - return defaultNotepad.stdoutThreshold -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func GetLogThreshold() Threshold { - return defaultNotepad.GetLogThreshold() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func GetStdoutThreshold() Threshold { - return defaultNotepad.GetStdoutThreshold() -} - -// LogCountForLevel returns the number of log invocations for a given threshold. -func LogCountForLevel(l Threshold) uint64 { - return defaultNotepad.LogCountForLevel(l) -} - -// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations -// greater than or equal to a given threshold. -func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { - return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold) -} - -// ResetLogCounters resets the invocation counters for all levels. -func ResetLogCounters() { - defaultNotepad.ResetLogCounters() -} diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go deleted file mode 100644 index 11423ac4..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/log_counter.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "sync/atomic" -) - -type logCounter struct { - counter uint64 -} - -func (c *logCounter) incr() { - atomic.AddUint64(&c.counter, 1) -} - -func (c *logCounter) resetCounter() { - atomic.StoreUint64(&c.counter, 0) -} - -func (c *logCounter) getCount() uint64 { - return atomic.LoadUint64(&c.counter) -} - -func (c *logCounter) Write(p []byte) (n int, err error) { - c.incr() - return len(p), nil -} - -// LogCountForLevel returns the number of log invocations for a given threshold. -func (n *Notepad) LogCountForLevel(l Threshold) uint64 { - return n.logCounters[l].getCount() -} - -// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations -// greater than or equal to a given threshold. -func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { - var cnt uint64 - - for i := int(threshold); i < len(n.logCounters); i++ { - cnt += n.LogCountForLevel(Threshold(i)) - } - - return cnt -} - -// ResetLogCounters resets the invocation counters for all levels. -func (n *Notepad) ResetLogCounters() { - for _, np := range n.logCounters { - np.resetCounter() - } -} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go deleted file mode 100644 index ae5aaf71..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/notepad.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "fmt" - "io" - "log" -) - -type Threshold int - -func (t Threshold) String() string { - return prefixes[t] -} - -const ( - LevelTrace Threshold = iota - LevelDebug - LevelInfo - LevelWarn - LevelError - LevelCritical - LevelFatal -) - -var prefixes map[Threshold]string = map[Threshold]string{ - LevelTrace: "TRACE", - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarn: "WARN", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Notepad is where you leave a note! -type Notepad struct { - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - loggers [7]**log.Logger - logHandle io.Writer - outHandle io.Writer - logThreshold Threshold - stdoutThreshold Threshold - prefix string - flags int - - // One per Threshold - logCounters [7]*logCounter -} - -// NewNotepad create a new notepad. -func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad { - n := &Notepad{} - - n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} - n.outHandle = outHandle - n.logHandle = logHandle - n.stdoutThreshold = outThreshold - n.logThreshold = logThreshold - - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - - n.flags = flags - - n.LOG = log.New(n.logHandle, - "LOG: ", - n.flags) - n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} - - n.init() - return n -} - -// init creates the loggers for each level depending on the notepad thresholds. -func (n *Notepad) init() { - logAndOut := io.MultiWriter(n.outHandle, n.logHandle) - - for t, logger := range n.loggers { - threshold := Threshold(t) - counter := &logCounter{} - n.logCounters[t] = counter - prefix := n.prefix + threshold.String() + " " - - switch { - case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: - *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags) - - case threshold >= n.logThreshold: - *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags) - - case threshold >= n.stdoutThreshold: - *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags) - - default: - // counter doesn't care about prefix and flags, so don't use them - // for performance. - *logger = log.New(counter, "", 0) - } - } -} - -// SetLogThreshold changes the threshold above which messages are written to the -// log file. -func (n *Notepad) SetLogThreshold(threshold Threshold) { - n.logThreshold = threshold - n.init() -} - -// SetLogOutput changes the file where log messages are written. -func (n *Notepad) SetLogOutput(handle io.Writer) { - n.logHandle = handle - n.init() -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func (n *Notepad) GetLogThreshold() Threshold { - return n.logThreshold -} - -// SetStdoutThreshold changes the threshold above which messages are written to the -// standard output. -func (n *Notepad) SetStdoutThreshold(threshold Threshold) { - n.stdoutThreshold = threshold - n.init() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func (n *Notepad) GetStdoutThreshold() Threshold { - return n.stdoutThreshold -} - -// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between -// brackets at the beginning of the line. An empty prefix won't be displayed at all. -func (n *Notepad) SetPrefix(prefix string) { - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - n.init() -} - -// SetFlags choose which flags the logger will display (after prefix and message -// level). See the package log for more informations on this. -func (n *Notepad) SetFlags(flags int) { - n.flags = flags - n.init() -} - -// Feedback writes plainly to the outHandle while -// logging with the standard extra information (date, file, etc). -type Feedback struct { - out *log.Logger - log *log.Logger -} - -func (fb *Feedback) Println(v ...interface{}) { - fb.output(fmt.Sprintln(v...)) -} - -func (fb *Feedback) Printf(format string, v ...interface{}) { - fb.output(fmt.Sprintf(format, v...)) -} - -func (fb *Feedback) Print(v ...interface{}) { - fb.output(fmt.Sprint(v...)) -} - -func (fb *Feedback) output(s string) { - if fb.out != nil { - fb.out.Output(2, s) - } - if fb.log != nil { - fb.log.Output(2, s) - } -} diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig index 63afcbcd..1f664d13 100644 --- a/vendor/github.com/spf13/viper/.editorconfig +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -11,5 +11,8 @@ trim_trailing_whitespace = true [*.go] indent_style = tab -[{Makefile, *.mk}] +[{Makefile,*.mk}] indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/spf13/viper/.envrc b/vendor/github.com/spf13/viper/.envrc new file mode 100644 index 00000000..3ce7171a --- /dev/null +++ b/vendor/github.com/spf13/viper/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +fi +use flake . --impure diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore index 89625083..f1bbd428 100644 --- a/vendor/github.com/spf13/viper/.gitignore +++ b/vendor/github.com/spf13/viper/.gitignore @@ -1,4 +1,7 @@ +/.devenv/ +/.direnv/ /.idea/ +/.pre-commit-config.yaml /bin/ /build/ /var/ diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml new file mode 100644 index 00000000..1faeae42 --- /dev/null +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -0,0 +1,108 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/viper) + gocritic: + # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. + enabled-tags: + - diagnostic + - experimental + - opinionated + - style + disabled-checks: + - importShadow + - unnamedResult + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/spf13/viper + +linters: + disable-all: true + enable: + - bodyclose + - dogsled + - dupl + - durationcheck + - exhaustive + - exportloopref + - gci + - gocritic + - godot + - gofmt + - gofumpt + - goimports + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - stylecheck + - tparallel + - typecheck + - unconvert + - unparam + - unused + - wastedassign + - whitespace + + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocyclo + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck + + # unused + # - depguard + # - goheader + # - gomodguard + + # deprecated + # - deadcode + # - structcheck + # - varcheck + + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml deleted file mode 100644 index a0755ce7..00000000 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - golint: - min-confidence: 0.1 - goimports: - local-prefixes: github.com/spf13/viper - -linters: - enable-all: true - disable: - - funlen - - maligned - - # TODO: fix me - - wsl - - gochecknoinits - - gosimple - - gochecknoglobals - - errcheck - - lll - - godox - - scopelint - - gocyclo - - gocognit - - gocritic - -service: - golangci-lint-version: 1.21.x diff --git a/vendor/github.com/spf13/viper/.yamlignore b/vendor/github.com/spf13/viper/.yamlignore new file mode 100644 index 00000000..c04c4dea --- /dev/null +++ b/vendor/github.com/spf13/viper/.yamlignore @@ -0,0 +1,2 @@ +# TODO: FIXME +/.github/ diff --git a/vendor/github.com/spf13/viper/.yamllint.yaml b/vendor/github.com/spf13/viper/.yamllint.yaml new file mode 100644 index 00000000..bac19ce1 --- /dev/null +++ b/vendor/github.com/spf13/viper/.yamllint.yaml @@ -0,0 +1,6 @@ +ignore-from-file: [.gitignore, .yamlignore] + +extends: default + +rules: + line-length: disable diff --git a/vendor/github.com/spf13/viper/BUILD.bazel b/vendor/github.com/spf13/viper/BUILD.bazel index a2b144ba..62f0babd 100644 --- a/vendor/github.com/spf13/viper/BUILD.bazel +++ b/vendor/github.com/spf13/viper/BUILD.bazel @@ -3,7 +3,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "viper", srcs = [ + "file.go", "flags.go", + "logger.go", "util.go", "viper.go", ], @@ -12,17 +14,19 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/fsnotify/fsnotify", - "//vendor/github.com/hashicorp/hcl", - "//vendor/github.com/hashicorp/hcl/hcl/printer", - "//vendor/github.com/magiconair/properties", "//vendor/github.com/mitchellh/mapstructure", - "//vendor/github.com/pelletier/go-toml", + "//vendor/github.com/sagikazarmark/slog-shim", "//vendor/github.com/spf13/afero", "//vendor/github.com/spf13/cast", - "//vendor/github.com/spf13/jwalterweatherman", "//vendor/github.com/spf13/pflag", - "//vendor/github.com/subosito/gotenv", - "//vendor/gopkg.in/ini.v1:ini_v1", - "//vendor/gopkg.in/yaml.v2:yaml_v2", + "//vendor/github.com/spf13/viper/internal/encoding", + "//vendor/github.com/spf13/viper/internal/encoding/dotenv", + "//vendor/github.com/spf13/viper/internal/encoding/hcl", + "//vendor/github.com/spf13/viper/internal/encoding/ini", + "//vendor/github.com/spf13/viper/internal/encoding/javaproperties", + "//vendor/github.com/spf13/viper/internal/encoding/json", + "//vendor/github.com/spf13/viper/internal/encoding/toml", + "//vendor/github.com/spf13/viper/internal/encoding/yaml", + "//vendor/github.com/spf13/viper/internal/features", ], ) diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 1c2cab03..a77b9c81 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 0.4.0 -GOLANGCI_VERSION = 1.21.0 +GOTESTSUM_VERSION = 1.9.0 +GOLANGCI_VERSION = 1.53.3 # Add the ability to override some variables # Use with care @@ -29,11 +29,6 @@ clear: ## Clear the working area and the project .PHONY: check check: test lint ## Run tests and linters -bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION} - @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum -bin/gotestsum-${GOTESTSUM_VERSION}: - @mkdir -p bin - curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION} TEST_PKGS ?= ./... .PHONY: test @@ -44,20 +39,36 @@ test: bin/gotestsum ## Run tests @mkdir -p ${BUILD_DIR} bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) -bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} - @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint -bin/golangci-lint-${GOLANGCI_VERSION}: - @mkdir -p bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} - @mv bin/golangci-lint $@ - .PHONY: lint -lint: bin/golangci-lint ## Run linter - bin/golangci-lint run +lint: lint-go lint-yaml +lint: ## Run linters -.PHONY: fix -fix: bin/golangci-lint ## Fix lint violations - bin/golangci-lint run --fix +.PHONY: lint-go +lint-go: + golangci-lint run $(if ${CI},--out-format github-actions,) + +.PHONY: lint-yaml +lint-yaml: + yamllint $(if ${CI},-f github,) --no-warnings . + +.PHONY: fmt +fmt: ## Format code + golangci-lint run --fix + +deps: bin/golangci-lint bin/gotestsum yamllint +deps: ## Install dependencies + +bin/gotestsum: + @mkdir -p bin + curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum && chmod +x ./bin/gotestsum + +bin/golangci-lint: + @mkdir -p bin + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- v${GOLANGCI_VERSION} + +.PHONY: yamllint +yamllint: + pip3 install --user yamllint # Add custom targets here -include custom.mk diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index dfd8034f..3fc7d84f 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -1,11 +1,18 @@ +> ## Viper v2 feedback +> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 +> +> **Thank you!** + ![Viper](.github/logo.png?raw=true) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) +[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/viper) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -20,19 +27,25 @@ Many Go projects are built using Viper including: * [doctl](https://github.com/digitalocean/doctl) * [Clairctl](https://github.com/jgsqware/clairctl) * [Mercure](https://mercure.rocks) +* [Meshery](https://github.com/meshery/meshery) +* [Bearer](https://github.com/bearer/bearer) +* [Coder](https://github.com/coder/coder) +* [Vitess](https://vitess.io/) ## Install -```console +```shell go get github.com/spf13/viper ``` +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. + ## What is Viper? -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed -to work within an application, and can handle all types of configuration needs +Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). +It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: * setting defaults @@ -110,7 +123,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) + panic(fmt.Errorf("fatal error config file: %w", err)) } ``` @@ -118,17 +131,17 @@ You can handle the specific case where no config file is found like this: ```go if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } } // Config file found and successfully parsed ``` -*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc` +*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmatically. For those configuration files that lie in the home of the user without any extension like `.bashrc` ### Writing Config Files @@ -166,10 +179,10 @@ Optionally you can provide a function for Viper to run each time a change occurs **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { fmt.Println("Config file changed:", e.Name) }) +viper.WatchConfig() ``` ### Reading Config from io.Reader @@ -209,6 +222,7 @@ These could be from a command line flag, or from your own application logic. ```go viper.Set("Verbose", true) viper.Set("LogFile", LogFile) +viper.Set("host.port", 5899) // set subset ``` ### Registering and Using Aliases @@ -245,9 +259,10 @@ using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from the environment variables. Both `BindEnv` and `AutomaticEnv` will use this prefix. -`BindEnv` takes one or two parameters. The first parameter is the key name, the -second is the name of the environment variable. The name of the environment -variable is case sensitive. If the ENV variable name is not provided, then +`BindEnv` takes one or more parameters. The first parameter is the key name, the +rest are the name of the environment variables to bind to this key. If more than +one are provided, they will take precedence in the specified order. The name of +the environment variable is case sensitive. If the ENV variable name is not provided, then Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), it **does not** automatically add the prefix. For example if the second parameter is "id", Viper will look for the ENV variable "ID". @@ -259,7 +274,7 @@ the `BindEnv` is called. `AutomaticEnv` is a powerful helper especially when combined with `SetEnvPrefix`. When called, Viper will check for an environment variable any time a `viper.Get` request is made. It will apply the following rules. It will -check for a environment variable with a name matching the key uppercased and +check for an environment variable with a name matching the key uppercased and prefixed with the `EnvPrefix` if set. `SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env @@ -344,7 +359,7 @@ func main() { i := viper.GetInt("flagname") // retrieve value from viper - ... + // ... } ``` @@ -403,7 +418,9 @@ in a Key/Value store such as etcd or Consul. These values take precedence over default values, but are overridden by configuration values retrieved from disk, flags, or environment variables. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. + +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -415,7 +432,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` @@ -437,6 +454,13 @@ viper.SetConfigType("json") // because there is no file extension in a stream of err := viper.ReadRemoteConfig() ``` +#### etcd3 +```go +viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" +err := viper.ReadRemoteConfig() +``` + #### Consul You need to set a key to Consul key/value storage with JSON value containing your desired config. For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: @@ -467,6 +491,15 @@ err := viper.ReadRemoteConfig() Of course, you're allowed to use `SecureRemoteProvider` also + +#### NATS + +```go +viper.AddRemoteProvider("nats", "nats://127.0.0.1:4222", "myapp.config") +viper.SetConfigType("json") +err := viper.ReadRemoteConfig() +``` + ### Remote Key/Value Store Example - Encrypted ```go @@ -493,18 +526,18 @@ runtime_viper.Unmarshal(&runtime_conf) // open a goroutine to watch remote changes forever go func(){ for { - time.Sleep(time.Second * 5) // delay after each request + time.Sleep(time.Second * 5) // delay after each request - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) } }() ``` @@ -514,29 +547,32 @@ go func(){ In Viper, there are a few ways to get a value depending on the value’s type. The following functions and methods exist: - * `Get(key string) : interface{}` + * `Get(key string) : any` * `GetBool(key string) : bool` * `GetFloat64(key string) : float64` * `GetInt(key string) : int` * `GetIntSlice(key string) : []int` * `GetString(key string) : string` - * `GetStringMap(key string) : map[string]interface{}` + * `GetStringMap(key string) : map[string]any` * `GetStringMapString(key string) : map[string]string` * `GetStringSlice(key string) : []string` * `GetTime(key string) : time.Time` * `GetDuration(key string) : time.Duration` * `IsSet(key string) : bool` - * `AllSettings() : map[string]interface{}` + * `AllSettings() : map[string]any` One important thing to recognize is that each Get function will return a zero value if it’s not found. To check if a given key exists, the `IsSet()` method has been provided. +The zero value will also be returned if the value is set, but fails to parse +as the requested type. + Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting if viper.GetBool("verbose") { - fmt.Println("verbose enabled") + fmt.Println("verbose enabled") } ``` ### Accessing nested keys @@ -582,10 +618,37 @@ the `Set()` method, …) with an immediate value, then all sub-keys of `datastore.metric` become undefined, they are “shadowed” by the higher-priority configuration level. +Viper can access array indices by using numbers in the path. For example: + +```jsonc +{ + "host": { + "address": "localhost", + "ports": [ + 5799, + 6029 + ] + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetInt("host.ports.1") // returns 6029 + +``` + Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. -```json +```jsonc { "datastore.metric.host": "0.0.0.0", "host": { @@ -607,14 +670,15 @@ will be returned instead. E.g. GetString("datastore.metric.host") // returns "0.0.0.0" ``` -### Extract sub-tree +### Extracting a sub-tree -Extract sub-tree from Viper. +When developing reusable modules, it's often useful to extract a subset of the configuration +and pass it to a module. This way the module can be instantiated more than once, with different configurations. -For example, `viper` represents: +For example, an application might use multiple different cache stores for different purposes: -```json -app: +```yaml +cache: cache1: max-items: 100 item-size: 64 @@ -623,35 +687,36 @@ app: item-size: 80 ``` -After executing: +We could pass the cache name to a module (eg. `NewCache("cache1")`), +but it would require weird concatenation for accessing config keys and would be less separated from the global config. + +So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: ```go -subv := viper.Sub("app.cache1") +cache1Config := viper.Sub("cache.cache1") +if cache1Config == nil { // Sub returns nil if the key cannot be found + panic("cache configuration not found") +} + +cache1 := NewCache(cache1Config) ``` -`subv` represents: +**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. -```json -max-items: 100 -item-size: 64 -``` - -Suppose we have: +Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: ```go -func NewCache(cfg *Viper) *Cache {...} +func NewCache(v *Viper) *Cache { + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } +} ``` -which creates a cache based on config information formatted as `subv`. -Now it’s easy to create these 2 caches separately as: +The resulting code is easy to test, since it's decoupled from the main config structure, +and easier to reuse (for the same reason). -```go -cfg1 := viper.Sub("app.cache1") -cache1 := NewCache(cfg1) - -cfg2 := viper.Sub("app.cache2") -cache2 := NewCache(cfg2) -``` ### Unmarshaling @@ -660,8 +725,8 @@ etc. There are two methods to do this: - * `Unmarshal(rawVal interface{}) : error` - * `UnmarshalKey(key string, rawVal interface{}) : error` + * `Unmarshal(rawVal any) : error` + * `UnmarshalKey(key string, rawVal any) : error` Example: @@ -686,19 +751,19 @@ you have to change the delimiter: ```go v := viper.NewWithOptions(viper.KeyDelimiter("::")) -v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, +v.SetDefault("chart::values", map[string]any{ + "ingress": map[string]any{ + "annotations": map[string]any{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, }) type config struct { Chart struct{ - Values map[string]interface{} - } + Values map[string]any + } } var C config @@ -739,6 +804,15 @@ if err != nil { Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +### Decoding custom formats + +A frequently requested feature for Viper is adding more value formats and decoders. +For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. + +This is already available in Viper using mapstructure decode hooks. + +Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). + ### Marshalling to string You may need to marshal all the settings held in viper into a string rather than write them to a file. @@ -746,17 +820,17 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" - // ... + yaml "gopkg.in/yaml.v2" + // ... ) func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) } ``` @@ -792,15 +866,63 @@ y.SetDefault("ContentDir", "foobar") When working with multiple vipers, it is up to the user to keep track of the different vipers. + ## Q & A -Q: Why is it called “Viper”? +### Why is it called “Viper”? A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) to [Cobra](https://github.com/spf13/cobra). While both can operate completely independently, together they make a powerful pair to handle much of your application foundation needs. -Q: Why is it called “Cobra”? +### Why is it called “Cobra”? -A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? +Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? + +### Does Viper support case sensitive keys? + +**tl;dr:** No. + +Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). +In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. + +There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. + +You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 + +### Is it safe to concurrently read and write to a viper? + +No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. + +## Troubleshooting + +See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). + +## Development + +**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** + +_Alternatively, install [Go](https://go.dev/dl/) on your computer then run `make deps` to install the rest of the dependencies._ + +Run the test suite: + +```shell +make test +``` + +Run linters: + +```shell +make lint # pass -j option to run them in parallel +``` + +Some linter violations can automatically be fixed: + +```shell +make fmt +``` + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md new file mode 100644 index 00000000..b68993d4 --- /dev/null +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -0,0 +1,32 @@ +# Troubleshooting + +## Unmarshaling doesn't work + +The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. + +## Cannot find package + +Viper installation seems to fail a lot lately with the following (or a similar) error: + +``` +cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: +/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) +/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) +``` + +As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). + +The solution is easy: switch to using Go Modules. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. + +**tl;dr* `export GO111MODULE=on` + +## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file + +This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). + +Potential solutions are: + +1. Quoting values resolved as boolean +1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/vendor/github.com/spf13/viper/file.go b/vendor/github.com/spf13/viper/file.go new file mode 100644 index 00000000..a54fe5a7 --- /dev/null +++ b/vendor/github.com/spf13/viper/file.go @@ -0,0 +1,56 @@ +//go:build !finder + +package viper + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + v.logger.Info("searching for config in paths", "paths", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +func (v *Viper) searchInPath(in string) (filename string) { + v.logger.Debug("searching for config in path", "path", in) + for _, ext := range SupportedExts { + v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + if v.configType != "" { + if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { + return filepath.Join(in, v.configName) + } + } + + return "" +} + +// exists checks if file exists. +func exists(fs afero.Fs, path string) (bool, error) { + stat, err := fs.Stat(path) + if err == nil { + return !stat.IsDir(), nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/spf13/viper/file_finder.go b/vendor/github.com/spf13/viper/file_finder.go new file mode 100644 index 00000000..d96a1bd2 --- /dev/null +++ b/vendor/github.com/spf13/viper/file_finder.go @@ -0,0 +1,38 @@ +//go:build finder + +package viper + +import ( + "fmt" + + "github.com/sagikazarmark/locafero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + var names []string + + if v.configType != "" { + names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) + } else { + names = locafero.NameWithExtensions(v.configName, SupportedExts...) + } + + finder := locafero.Finder{ + Paths: v.configPaths, + Names: names, + Type: locafero.FileTypeFile, + } + + results, err := finder.Find(v.fs) + if err != nil { + return "", err + } + + if len(results) == 0 { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + return results[0], nil +} diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go index b5ddbf5d..de033ed5 100644 --- a/vendor/github.com/spf13/viper/flags.go +++ b/vendor/github.com/spf13/viper/flags.go @@ -30,8 +30,8 @@ func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { }) } -// pflagValue is a wrapper aroung *pflag.flag -// that implements FlagValue +// pflagValue is a wrapper around *pflag.flag +// that implements FlagValue. type pflagValue struct { flag *pflag.Flag } diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock new file mode 100644 index 00000000..3840614f --- /dev/null +++ b/vendor/github.com/spf13/viper/flake.lock @@ -0,0 +1,273 @@ +{ + "nodes": { + "devenv": { + "inputs": { + "flake-compat": "flake-compat", + "nix": "nix", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1707817777, + "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", + "owner": "cachix", + "repo": "devenv", + "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "lowdown-src": { + "flake": false, + "locked": { + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "owner": "kristapsdz", + "repo": "lowdown", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "type": "github" + }, + "original": { + "owner": "kristapsdz", + "repo": "lowdown", + "type": "github" + } + }, + "nix": { + "inputs": { + "lowdown-src": "lowdown-src", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1676545802, + "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "owner": "domenkozar", + "repo": "nix", + "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "relaxed-flakes", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1678875422, + "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1706550542, + "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1704725188, + "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "ea96f0c05924341c551a797aaba8126334c505d2", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix new file mode 100644 index 00000000..0230668c --- /dev/null +++ b/vendor/github.com/spf13/viper/flake.nix @@ -0,0 +1,57 @@ +{ + description = "Viper"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.go_1_22; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + yamllint.enable = true; + }; + + packages = with pkgs; [ + gnumake + + golangci-lint + yamllint + ]; + + scripts = { + versions.exec = '' + go version + golangci-lint version + ''; + }; + + enterShell = '' + versions + ''; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/BUILD.bazel new file mode 100644 index 00000000..e172a36a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "encoding", + srcs = [ + "decoder.go", + "encoder.go", + "error.go", + ], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding", + importpath = "github.com/spf13/viper/internal/encoding", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go new file mode 100644 index 00000000..8a7b1dbc --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -0,0 +1,61 @@ +package encoding + +import ( + "sync" +) + +// Decoder decodes the contents of b into v. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +const ( + // ErrDecoderNotFound is returned when there is no decoder registered for a format. + ErrDecoderNotFound = encodingError("decoder not found for this format") + + // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. + ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") +) + +// DecoderRegistry can choose an appropriate Decoder based on the provided format. +type DecoderRegistry struct { + decoders map[string]Decoder + + mu sync.RWMutex +} + +// NewDecoderRegistry returns a new, initialized DecoderRegistry. +func NewDecoderRegistry() *DecoderRegistry { + return &DecoderRegistry{ + decoders: make(map[string]Decoder), + } +} + +// RegisterDecoder registers a Decoder for a format. +// Registering a Decoder for an already existing format is not supported. +func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.decoders[format]; ok { + return ErrDecoderFormatAlreadyRegistered + } + + e.decoders[format] = enc + + return nil +} + +// Decode calls the underlying Decoder based on the format. +func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]any) error { + e.mu.RLock() + decoder, ok := e.decoders[format] + e.mu.RUnlock() + + if !ok { + return ErrDecoderNotFound + } + + return decoder.Decode(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/dotenv/BUILD.bazel new file mode 100644 index 00000000..274824c2 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "dotenv", + srcs = [ + "codec.go", + "map_utils.go", + ], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/dotenv", + importpath = "github.com/spf13/viper/internal/encoding/dotenv", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = [ + "//vendor/github.com/spf13/cast", + "//vendor/github.com/subosito/gotenv", + ], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go new file mode 100644 index 00000000..3ebc76f0 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go @@ -0,0 +1,61 @@ +package dotenv + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/subosito/gotenv" +) + +const keyDelimiter = "_" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables +// (commonly called as dotenv format). +type Codec struct{} + +func (Codec) Encode(v map[string]any) ([]byte, error) { + flattened := map[string]any{} + + flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + var buf bytes.Buffer + + for _, key := range keys { + _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]any) error { + var buf bytes.Buffer + + _, err := buf.Write(b) + if err != nil { + return err + } + + env, err := gotenv.StrictParse(&buf) + if err != nil { + return err + } + + for key, value := range env { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go new file mode 100644 index 00000000..8bfe0a9d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go @@ -0,0 +1,41 @@ +package dotenv + +import ( + "strings" + + "github.com/spf13/cast" +) + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in the main package. +// TODO: move it to a common place. +func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]any) + } + + var m2 map[string]any + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go new file mode 100644 index 00000000..65958596 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -0,0 +1,60 @@ +package encoding + +import ( + "sync" +) + +// Encoder encodes the contents of v into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +const ( + // ErrEncoderNotFound is returned when there is no encoder registered for a format. + ErrEncoderNotFound = encodingError("encoder not found for this format") + + // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. + ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") +) + +// EncoderRegistry can choose an appropriate Encoder based on the provided format. +type EncoderRegistry struct { + encoders map[string]Encoder + + mu sync.RWMutex +} + +// NewEncoderRegistry returns a new, initialized EncoderRegistry. +func NewEncoderRegistry() *EncoderRegistry { + return &EncoderRegistry{ + encoders: make(map[string]Encoder), + } +} + +// RegisterEncoder registers an Encoder for a format. +// Registering a Encoder for an already existing format is not supported. +func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.encoders[format]; ok { + return ErrEncoderFormatAlreadyRegistered + } + + e.encoders[format] = enc + + return nil +} + +func (e *EncoderRegistry) Encode(format string, v map[string]any) ([]byte, error) { + e.mu.RLock() + encoder, ok := e.encoders[format] + e.mu.RUnlock() + + if !ok { + return nil, ErrEncoderNotFound + } + + return encoder.Encode(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go new file mode 100644 index 00000000..e4cde02d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/error.go @@ -0,0 +1,7 @@ +package encoding + +type encodingError string + +func (e encodingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/hcl/BUILD.bazel new file mode 100644 index 00000000..82efa9d0 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "hcl", + srcs = ["codec.go"], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/hcl", + importpath = "github.com/spf13/viper/internal/encoding/hcl", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = [ + "//vendor/github.com/hashicorp/hcl", + "//vendor/github.com/hashicorp/hcl/hcl/printer", + ], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go new file mode 100644 index 00000000..d7fa8a1b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "bytes" + "encoding/json" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. +// TODO: add printer config to the codec? +type Codec struct{} + +func (Codec) Encode(v map[string]any) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // TODO: use printer.Format? Is the trailing newline an issue? + + ast, err := hcl.Parse(string(b)) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + + err = printer.Fprint(&buf, ast.Node) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]any) error { + return hcl.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/ini/BUILD.bazel new file mode 100644 index 00000000..1bf56d1a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ini", + srcs = [ + "codec.go", + "map_utils.go", + ], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/ini", + importpath = "github.com/spf13/viper/internal/encoding/ini", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = [ + "//vendor/github.com/spf13/cast", + "//vendor/gopkg.in/ini.v1:ini_v1", + ], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go new file mode 100644 index 00000000..d91cf59d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go @@ -0,0 +1,99 @@ +package ini + +import ( + "bytes" + "sort" + "strings" + + "github.com/spf13/cast" + "gopkg.in/ini.v1" +) + +// LoadOptions contains all customized options used for load data source(s). +// This type is added here for convenience: this way consumers can import a single package called "ini". +type LoadOptions = ini.LoadOptions + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. +type Codec struct { + KeyDelimiter string + LoadOptions LoadOptions +} + +func (c Codec) Encode(v map[string]any) ([]byte, error) { + cfg := ini.Empty() + ini.PrettyFormat = false + + flattened := map[string]any{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + sectionName, keyName := "", key + + lastSep := strings.LastIndex(key, ".") + if lastSep != -1 { + sectionName = key[:(lastSep)] + keyName = key[(lastSep + 1):] + } + + // TODO: is this a good idea? + if sectionName == "default" { + sectionName = "" + } + + cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) + } + + var buf bytes.Buffer + + _, err := cfg.WriteTo(&buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c Codec) Decode(b []byte, v map[string]any) error { + cfg := ini.Empty(c.LoadOptions) + + err := cfg.Append(b) + if err != nil { + return err + } + + sections := cfg.Sections() + + for i := 0; i < len(sections); i++ { + section := sections[i] + keys := section.Keys() + + for j := 0; j < len(keys); j++ { + key := keys[j] + value := cfg.Section(section.Name()).Key(key.Name()).String() + + deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) + + // set innermost value + deepestMap[key.Name()] = value + } + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go new file mode 100644 index 00000000..490ab594 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go @@ -0,0 +1,74 @@ +package ini + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]any, path []string) map[string]any { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]any) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]any) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]any) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in the main package. +// TODO: move it to a common place. +func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]any) + } + + var m2 map[string]any + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/BUILD.bazel new file mode 100644 index 00000000..5addbdcd --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "javaproperties", + srcs = [ + "codec.go", + "map_utils.go", + ], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/javaproperties", + importpath = "github.com/spf13/viper/internal/encoding/javaproperties", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = [ + "//vendor/github.com/magiconair/properties", + "//vendor/github.com/spf13/cast", + ], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go new file mode 100644 index 00000000..e92e5172 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go @@ -0,0 +1,86 @@ +package javaproperties + +import ( + "bytes" + "sort" + "strings" + + "github.com/magiconair/properties" + "github.com/spf13/cast" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. +type Codec struct { + KeyDelimiter string + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + // TODO: drop this feature in v2 + // TODO: make use of the global properties object optional + Properties *properties.Properties +} + +func (c *Codec) Encode(v map[string]any) ([]byte, error) { + if c.Properties == nil { + c.Properties = properties.NewProperties() + } + + flattened := map[string]any{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) + if err != nil { + return nil, err + } + } + + var buf bytes.Buffer + + _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Codec) Decode(b []byte, v map[string]any) error { + var err error + c.Properties, err = properties.Load(b, properties.UTF8) + if err != nil { + return err + } + + for _, key := range c.Properties.Keys() { + // ignore existence check: we know it's there + value, _ := c.Properties.Get(key) + + // recursively build nested maps + path := strings.Split(key, c.keyDelimiter()) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go new file mode 100644 index 00000000..6e1aff22 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go @@ -0,0 +1,74 @@ +package javaproperties + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]any, path []string) map[string]any { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]any) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]any) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]any) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in the main package. +// TODO: move it to a common place. +func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]any) + } + + var m2 map[string]any + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/json/BUILD.bazel new file mode 100644 index 00000000..e74fa56d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "json", + srcs = ["codec.go"], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/json", + importpath = "github.com/spf13/viper/internal/encoding/json", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go new file mode 100644 index 00000000..da7546b5 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -0,0 +1,17 @@ +package json + +import ( + "encoding/json" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]any) ([]byte, error) { + // TODO: expose prefix and indent in the Codec as setting? + return json.MarshalIndent(v, "", " ") +} + +func (Codec) Decode(b []byte, v map[string]any) error { + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/toml/BUILD.bazel new file mode 100644 index 00000000..c5f79bb0 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "toml", + srcs = ["codec.go"], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/toml", + importpath = "github.com/spf13/viper/internal/encoding/toml", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = ["//vendor/github.com/pelletier/go-toml/v2:go-toml"], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go new file mode 100644 index 00000000..c70aa8d2 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -0,0 +1,16 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]any) ([]byte, error) { + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]any) error { + return toml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/BUILD.bazel b/vendor/github.com/spf13/viper/internal/encoding/yaml/BUILD.bazel new file mode 100644 index 00000000..b0739d1c --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "yaml", + srcs = ["codec.go"], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/encoding/yaml", + importpath = "github.com/spf13/viper/internal/encoding/yaml", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], + deps = ["//vendor/gopkg.in/yaml.v3:yaml_v3"], +) diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go new file mode 100644 index 00000000..03687924 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -0,0 +1,14 @@ +package yaml + +import "gopkg.in/yaml.v3" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]any) ([]byte, error) { + return yaml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]any) error { + return yaml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/features/BUILD.bazel b/vendor/github.com/spf13/viper/internal/features/BUILD.bazel new file mode 100644 index 00000000..0a29349d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "features", + srcs = ["bind_struct_default.go"], + importmap = "peridot.resf.org/vendor/github.com/spf13/viper/internal/features", + importpath = "github.com/spf13/viper/internal/features", + visibility = ["//vendor/github.com/spf13/viper:__subpackages__"], +) diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct.go b/vendor/github.com/spf13/viper/internal/features/bind_struct.go new file mode 100644 index 00000000..89302c21 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/bind_struct.go @@ -0,0 +1,5 @@ +//go:build viper_bind_struct + +package features + +const BindStruct = true diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go b/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go new file mode 100644 index 00000000..edfaf73b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go @@ -0,0 +1,5 @@ +//go:build !viper_bind_struct + +package features + +const BindStruct = false diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go new file mode 100644 index 00000000..8938053b --- /dev/null +++ b/vendor/github.com/spf13/viper/logger.go @@ -0,0 +1,68 @@ +package viper + +import ( + "context" + + slog "github.com/sagikazarmark/slog-shim" +) + +// Logger is a unified interface for various logging use cases and practices, including: +// - leveled logging +// - structured logging +// +// Deprecated: use `log/slog` instead. +type Logger interface { + // Trace logs a Trace event. + // + // Even more fine-grained information than Debug events. + // Loggers not supporting this level should fall back to Debug. + Trace(msg string, keyvals ...any) + + // Debug logs a Debug event. + // + // A verbose series of information events. + // They are useful when debugging the system. + Debug(msg string, keyvals ...any) + + // Info logs an Info event. + // + // General information about what's happening inside the system. + Info(msg string, keyvals ...any) + + // Warn logs a Warn(ing) event. + // + // Non-critical events that should be looked at. + Warn(msg string, keyvals ...any) + + // Error logs an Error event. + // + // Critical events that require immediate attention. + // Loggers commonly provide Fatal and Panic levels above Error level, + // but exiting and panicking is out of scope for a logging library. + Error(msg string, keyvals ...any) +} + +// WithLogger sets a custom logger. +func WithLogger(l *slog.Logger) Option { + return optionFunc(func(v *Viper) { + v.logger = l + }) +} + +type discardHandler struct{} + +func (n *discardHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (n *discardHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (n *discardHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return n +} + +func (n *discardHandler) WithGroup(_ string) slog.Handler { + return n +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index cee6b242..117c6ac3 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -18,9 +18,8 @@ import ( "strings" "unicode" - "github.com/spf13/afero" + slog "github.com/sagikazarmark/slog-shim" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" ) // ConfigParseError denotes failing to parse configuration file. @@ -33,13 +32,18 @@ func (pe ConfigParseError) Error() string { return fmt.Sprintf("While parsing config: %s", pe.err.Error()) } +// Unwrap returns the wrapped error. +func (pe ConfigParseError) Unwrap() error { + return pe.err +} + // toCaseInsensitiveValue checks if the value is a map; // if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value interface{}) interface{} { +func toCaseInsensitiveValue(value any) any { switch v := value.(type) { - case map[interface{}]interface{}: + case map[any]any: value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: + case map[string]any: value = copyAndInsensitiviseMap(v) } @@ -48,15 +52,15 @@ func toCaseInsensitiveValue(value interface{}) interface{} { // copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of // any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { - nm := make(map[string]interface{}) +func copyAndInsensitiviseMap(m map[string]any) map[string]any { + nm := make(map[string]any) for key, val := range m { lkey := strings.ToLower(key) switch v := val.(type) { - case map[interface{}]interface{}: + case map[any]any: nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: + case map[string]any: nm[lkey] = copyAndInsensitiviseMap(v) default: nm[lkey] = v @@ -66,18 +70,25 @@ func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { return nm } -func insensitiviseMap(m map[string]interface{}) { - for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } +func insensitiviseVal(val any) any { + switch v := val.(type) { + case map[any]any: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]any)) + case map[string]any: + // nested map: recursively insensitivise + insensitiviseMap(v) + case []any: + // nested array: recursively insensitivise + insensitiveArray(v) + } + return val +} +func insensitiviseMap(m map[string]any) { + for key, val := range m { + val = insensitiviseVal(val) lower := strings.ToLower(key) if key != lower { // remove old key (not lower-cased) @@ -88,26 +99,20 @@ func insensitiviseMap(m map[string]interface{}) { } } -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) +func insensitiveArray(a []any) { + for i, val := range a { + a[i] = insensitiviseVal(val) + } +} + +func absPathify(logger *slog.Logger, inPath string) string { + logger.Info("trying to resolve absolute path", "path", inPath) if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { inPath = userHomeDir() + inPath[5:] } - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - - var value, suffix string - if end == -1 { - value = os.Getenv(inPath[1:]) - } else { - value = os.Getenv(inPath[1:end]) - suffix = inPath[end:] - } - - inPath = value + suffix - } + inPath = os.ExpandEnv(inPath) if filepath.IsAbs(inPath) { return filepath.Clean(inPath) @@ -118,21 +123,9 @@ func absPathify(inPath string) string { return filepath.Clean(p) } - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} + logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) -// Check if file Exists -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err + return "" } func stringInSlice(a string, list []string) bool { @@ -163,7 +156,7 @@ func safeMul(a, b uint) uint { return c } -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes +// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes. func parseSizeInBytes(sizeStr string) uint { sizeStr = strings.TrimSpace(sizeStr) lastChar := len(sizeStr) - 1 @@ -205,22 +198,22 @@ func parseSizeInBytes(sizeStr string) uint { // In case intermediate keys do not exist, or map to a non-map value, // a new map is created and inserted, and the search continues from there: // the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { +func deepSearch(m map[string]any, path []string) map[string]any { for _, k := range path { m2, ok := m[k] if !ok { // intermediate key does not exist // => create it and continue from there - m3 := make(map[string]interface{}) + m3 := make(map[string]any) m[k] = m3 m = m3 continue } - m3, ok := m2.(map[string]interface{}) + m3, ok := m2.(map[string]any) if !ok { // intermediate key is a value // => replace with a new map - m3 = make(map[string]interface{}) + m3 = make(map[string]any) m[k] = m3 } // continue search from here diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 405dc20f..da68d994 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -22,31 +22,33 @@ package viper import ( "bytes" "encoding/csv" - "encoding/json" "errors" "fmt" "io" - "log" "os" "path/filepath" "reflect" + "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" - "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" - "github.com/pelletier/go-toml" + slog "github.com/sagikazarmark/slog-shim" "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" - "github.com/subosito/gotenv" - "gopkg.in/ini.v1" - "gopkg.in/yaml.v2" + + "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/ini" + "github.com/spf13/viper/internal/encoding/javaproperties" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" + "github.com/spf13/viper/internal/features" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -76,7 +78,7 @@ type remoteConfigFactory interface { WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) } -// RemoteConfig is optional, see the remote package +// RemoteConfig is optional, see the remote package. var RemoteConfig remoteConfigFactory // UnsupportedConfigError denotes encountering an unsupported @@ -101,7 +103,7 @@ func (str UnsupportedRemoteProviderError) Error() string { // pull the configuration from the remote provider. type RemoteConfigError string -// Error returns the formatted remote provider error +// Error returns the formatted remote provider error. func (rce RemoteConfigError) Error() string { return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) } @@ -125,16 +127,16 @@ func (faee ConfigFileAlreadyExistsError) Error() string { } // A DecoderConfigOption can be passed to viper.Unmarshal to configure -// mapstructure.DecoderConfig options +// mapstructure.DecoderConfig options. type DecoderConfigOption func(*mapstructure.DecoderConfig) // DecodeHook returns a DecoderConfigOption which overrides the default // DecoderConfig.DecodeHook value, the default is: // -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) +// mapstructure.ComposeDecodeHookFunc( +// mapstructure.StringToTimeDurationHookFunc(), +// mapstructure.StringToSliceHookFunc(","), +// ) func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { return func(c *mapstructure.DecoderConfig) { c.DecodeHook = hook @@ -155,18 +157,18 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // // For example, if values from the following sources were loaded: // -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } // // The resulting config will have the following values: // @@ -175,6 +177,8 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // "user": "root", // "endpoint": "https://localhost" // } +// +// Note: Vipers are not safe for concurrent Get() and Set() operations. type Viper struct { // Delimiter that separates a list of keys // used to access a nested value in one go @@ -196,24 +200,30 @@ type Viper struct { configPermissions os.FileMode envPrefix string + // Specific commands for ini parsing + iniLoadOptions ini.LoadOptions + automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool - config map[string]interface{} - override map[string]interface{} - defaults map[string]interface{} - kvstore map[string]interface{} + parents []string + config map[string]any + override map[string]any + defaults map[string]any + kvstore map[string]any pflags map[string]FlagValue - env map[string]string + env map[string][]string aliases map[string]string typeByDefValue bool - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - onConfigChange func(fsnotify.Event) + + logger *slog.Logger + + // TODO: should probably be protected with a mutex + encoderRegistry *encoding.EncoderRegistry + decoderRegistry *encoding.DecoderRegistry } // New returns an initialized Viper instance. @@ -221,16 +231,20 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" - v.configPermissions = os.FileMode(0644) + v.configPermissions = os.FileMode(0o644) v.fs = afero.NewOsFs() - v.config = make(map[string]interface{}) - v.override = make(map[string]interface{}) - v.defaults = make(map[string]interface{}) - v.kvstore = make(map[string]interface{}) + v.config = make(map[string]any) + v.parents = []string{} + v.override = make(map[string]any) + v.defaults = make(map[string]any) + v.kvstore = make(map[string]any) v.pflags = make(map[string]FlagValue) - v.env = make(map[string]string) + v.env = make(map[string][]string) v.aliases = make(map[string]string) v.typeByDefValue = false + v.logger = slog.New(&discardHandler{}) + + v.resetEncoding() return v } @@ -278,6 +292,8 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } + v.resetEncoding() + return v } @@ -286,8 +302,86 @@ func NewWithOptions(opts ...Option) *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} + SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} +} + +// TODO: make this lazy initialization instead. +func (v *Viper) resetEncoding() { + encoderRegistry := encoding.NewEncoderRegistry() + decoderRegistry := encoding.NewDecoderRegistry() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } + + { + codec := ini.Codec{ + KeyDelimiter: v.keyDelim, + LoadOptions: v.iniLoadOptions, + } + + encoderRegistry.RegisterEncoder("ini", codec) + decoderRegistry.RegisterDecoder("ini", codec) + } + + { + codec := &javaproperties.Codec{ + KeyDelimiter: v.keyDelim, + } + + encoderRegistry.RegisterEncoder("properties", codec) + decoderRegistry.RegisterDecoder("properties", codec) + + encoderRegistry.RegisterEncoder("props", codec) + decoderRegistry.RegisterDecoder("props", codec) + + encoderRegistry.RegisterEncoder("prop", codec) + decoderRegistry.RegisterDecoder("prop", codec) + } + + { + codec := &dotenv.Codec{} + + encoderRegistry.RegisterEncoder("dotenv", codec) + decoderRegistry.RegisterDecoder("dotenv", codec) + + encoderRegistry.RegisterEncoder("env", codec) + decoderRegistry.RegisterDecoder("env", codec) + } + + v.encoderRegistry = encoderRegistry + v.decoderRegistry = decoderRegistry } type defaultRemoteProvider struct { @@ -325,31 +419,37 @@ type RemoteProvider interface { } // SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} +var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} +// OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } + +// OnConfigChange sets the event handler that is called when a config file changes. func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { v.onConfigChange = run } +// WatchConfig starts watching a config file for changes. func WatchConfig() { v.WatchConfig() } +// WatchConfig starts watching a config file for changes. func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) go func() { watcher, err := fsnotify.NewWatcher() if err != nil { - log.Fatal(err) + v.logger.Error(fmt.Sprintf("failed to create watcher: %s", err)) + os.Exit(1) } defer watcher.Close() // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way filename, err := v.getConfigFile() if err != nil { - log.Printf("error: %v\n", err) + v.logger.Error(fmt.Sprintf("get config file: %s", err)) initWG.Done() return } @@ -372,27 +472,25 @@ func (v *Viper) WatchConfig() { // we only care about the config file with the following cases: // 1 - if the config file was modified or created // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) - const writeOrCreateMask = fsnotify.Write | fsnotify.Create if (filepath.Clean(event.Name) == configFile && - event.Op&writeOrCreateMask != 0) || + (event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) || (currentConfigFile != "" && currentConfigFile != realConfigFile) { realConfigFile = currentConfigFile err := v.ReadInConfig() if err != nil { - log.Printf("error reading config file: %v\n", err) + v.logger.Error(fmt.Sprintf("read config file: %s", err)) } if v.onConfigChange != nil { v.onConfigChange(event) } - } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + } else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) { eventsWG.Done() return } case err, ok := <-watcher.Errors: if ok { // 'Errors' channel is not closed - log.Printf("watcher error: %v\n", err) + v.logger.Error(fmt.Sprintf("watcher error: %s", err)) } eventsWG.Done() return @@ -409,6 +507,7 @@ func (v *Viper) WatchConfig() { // SetConfigFile explicitly defines the path, name and extension of the config file. // Viper will use this and not check any of the config paths. func SetConfigFile(in string) { v.SetConfigFile(in) } + func (v *Viper) SetConfigFile(in string) { if in != "" { v.configFile = in @@ -419,12 +518,19 @@ func (v *Viper) SetConfigFile(in string) { // E.g. if your prefix is "spf", the env registry will look for env // variables that start with "SPF_". func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } + func (v *Viper) SetEnvPrefix(in string) { if in != "" { v.envPrefix = in } } +func GetEnvPrefix() string { return v.GetEnvPrefix() } + +func (v *Viper) GetEnvPrefix() string { + return v.envPrefix +} + func (v *Viper) mergeWithEnvPrefix(in string) string { if v.envPrefix != "" { return strings.ToUpper(v.envPrefix + "_" + in) @@ -437,6 +543,7 @@ func (v *Viper) mergeWithEnvPrefix(in string) string { // but empty environment variables as valid values instead of falling back. // For backward compatibility reasons this is false by default. func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } + func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { v.allowEmptyEnv = allowEmptyEnv } @@ -465,10 +572,12 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } // AddConfigPath adds a path for Viper to search for the config file in. // Can be called multiple times to define multiple search paths. func AddConfigPath(in string) { v.AddConfigPath(in) } + func (v *Viper) AddConfigPath(in string) { if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") + absin := absPathify(v.logger, in) + + v.logger.Info("adding path to search paths", "path", absin) if !stringInSlice(absin, v.configPaths) { v.configPaths = append(v.configPaths, absin) } @@ -477,21 +586,23 @@ func (v *Viper) AddConfigPath(in string) { // AddRemoteProvider adds a remote configuration source. // Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port // path is the path in the k/v store to retrieve configuration // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to -// "myapp" +// "myapp". func AddRemoteProvider(provider, endpoint, path string) error { return v.AddRemoteProvider(provider, endpoint, path) } + func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { if !stringInSlice(provider, SupportedRemoteProviders) { return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -506,14 +617,14 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // AddSecureRemoteProvider adds a remote configuration source. // Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. // endpoint is the url. etcd requires http://ip:port consul requires ip:port // secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg // path is the path in the k/v store to retrieve configuration // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -// Secure Remote Providers are implemented with github.com/bketelsen/crypt +// "myapp". +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -523,7 +634,8 @@ func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -549,7 +661,7 @@ func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { // searchMap recursively searches for a value for path in source map. // Returns nil if not found. // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { +func (v *Viper) searchMap(source map[string]any, path []string) any { if len(path) == 0 { return source } @@ -562,13 +674,13 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac } // Nested case - switch next.(type) { - case map[interface{}]interface{}: + switch next := next.(type) { + case map[any]any: return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]interface{}: + case map[string]any: // Type assertion is safe here since it is only reached // if the type of `next` is the same as the type being asserted - return v.searchMap(next.(map[string]interface{}), path[1:]) + return v.searchMap(next, path[1:]) default: // got a value but nested key expected, return "nil" for not found return nil @@ -577,9 +689,9 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac return nil } -// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. // -// While searchMap() considers each path element as a single map key, this +// While searchMap() considers each path element as a single map key or slice index, this // function searches for, and prioritizes, merged path elements. // e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" // is also defined, this latter value is returned for path ["foo", "bar"]. @@ -588,7 +700,7 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // in their keys). // // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { +func (v *Viper) searchIndexableWithPathPrefixes(source any, path []string) any { if len(path) == 0 { return source } @@ -597,29 +709,86 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] for i := len(path); i > 0; i-- { prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - next, ok := source[prefixKey] - if ok { - // Fast path - if i == len(path) { - return next - } - - // Nested case - var val interface{} - switch next.(type) { - case map[interface{}]interface{}: - val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - if val != nil { - return val - } + var val any + switch sourceIndexable := source.(type) { + case []any: + val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) + case map[string]any: + val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) } + if val != nil { + return val + } + } + + // not found + return nil +} + +// searchSliceWithPathPrefixes searches for a value for path in sourceSlice +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchSliceWithPathPrefixes( + sourceSlice []any, + prefixKey string, + pathIndex int, + path []string, +) any { + // if the prefixKey is not a number or it is out of bounds of the slice + index, err := strconv.Atoi(prefixKey) + if err != nil || len(sourceSlice) <= index { + return nil + } + + next := sourceSlice[index] + + // Fast path + if pathIndex == len(path) { + return next + } + + switch n := next.(type) { + case map[any]any: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]any, []any: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + +// searchMapWithPathPrefixes searches for a value for path in sourceMap +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchMapWithPathPrefixes( + sourceMap map[string]any, + prefixKey string, + pathIndex int, + path []string, +) any { + next, ok := sourceMap[prefixKey] + if !ok { + return nil + } + + // Fast path + if pathIndex == len(path) { + return next + } + + // Nested case + switch n := next.(type) { + case map[any]any: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]any, []any: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix } // not found @@ -629,9 +798,10 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] // isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere // on its path in the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { - var parentVal interface{} +// +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]any) string { + var parentVal any for i := 1; i < len(path); i++ { parentVal = v.searchMap(m, path[0:i]) if parentVal == nil { @@ -639,9 +809,9 @@ func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) return "" } switch parentVal.(type) { - case map[interface{}]interface{}: + case map[any]any: continue - case map[string]interface{}: + case map[string]any: continue default: // parentVal is a regular value which shadows "path" @@ -654,13 +824,16 @@ func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) // isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere // in a sub-path of the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { +// +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi any) string { // unify input map var m map[string]interface{} - switch mi.(type) { - case map[string]string, map[string]FlagValue: - m = cast.ToStringMap(mi) + switch miv := mi.(type) { + case map[string]string: + m = castMapStringToMapInterface(miv) + case map[string]FlagValue: + m = castMapFlagToMapInterface(miv) default: return "" } @@ -679,7 +852,8 @@ func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { // isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere // in the environment, when automatic env is on. // e.g., if "foo.bar" has a value in the environment, it “shadows” -// "foo.bar.baz" in a lower-priority map +// +// "foo.bar.baz" in a lower-priority map func (v *Viper) isPathShadowedInAutoEnv(path []string) string { var parentKey string for i := 1; i < len(path); i++ { @@ -700,12 +874,13 @@ func (v *Viper) isPathShadowedInAutoEnv(path []string) string { // would return a string slice for the key if the key's type is inferred by // the default value and the Get function would return: // -// []string {"a", "b", "c"} +// []string {"a", "b", "c"} // // Otherwise the Get function would return: // -// "a b c" +// "a b c" func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } + func (v *Viper) SetTypeByDefaultValue(enable bool) { v.typeByDefValue = enable } @@ -722,8 +897,9 @@ func GetViper() *Viper { // override, flag, env, config file, key/value store, default // // Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) interface{} { return v.Get(key) } -func (v *Viper) Get(key string) interface{} { +func Get(key string) any { return v.Get(key) } + +func (v *Viper) Get(key string) any { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, true) if val == nil { @@ -764,6 +940,8 @@ func (v *Viper) Get(key string) interface{} { return cast.ToStringSlice(val) case []int: return cast.ToIntSlice(val) + case []time.Duration: + return cast.ToDurationSlice(val) } } @@ -773,6 +951,7 @@ func (v *Viper) Get(key string) interface{} { // Sub returns new Viper instance representing a sub tree of this instance. // Sub is case-insensitive for a key. func Sub(key string) *Viper { return v.Sub(key) } + func (v *Viper) Sub(key string) *Viper { subv := New() data := v.Get(key) @@ -781,6 +960,11 @@ func (v *Viper) Sub(key string) *Viper { } if reflect.TypeOf(data).Kind() == reflect.Map { + subv.parents = append([]string(nil), v.parents...) + subv.parents = append(subv.parents, strings.ToLower(key)) + subv.automaticEnvApplied = v.automaticEnvApplied + subv.envPrefix = v.envPrefix + subv.envKeyReplacer = v.envKeyReplacer subv.config = cast.ToStringMap(data) return subv } @@ -789,96 +973,119 @@ func (v *Viper) Sub(key string) *Viper { // GetString returns the value associated with the key as a string. func GetString(key string) string { return v.GetString(key) } + func (v *Viper) GetString(key string) string { return cast.ToString(v.Get(key)) } // GetBool returns the value associated with the key as a boolean. func GetBool(key string) bool { return v.GetBool(key) } + func (v *Viper) GetBool(key string) bool { return cast.ToBool(v.Get(key)) } // GetInt returns the value associated with the key as an integer. func GetInt(key string) int { return v.GetInt(key) } + func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } // GetInt32 returns the value associated with the key as an integer. func GetInt32(key string) int32 { return v.GetInt32(key) } + func (v *Viper) GetInt32(key string) int32 { return cast.ToInt32(v.Get(key)) } // GetInt64 returns the value associated with the key as an integer. func GetInt64(key string) int64 { return v.GetInt64(key) } + func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } + func (v *Viper) GetUint(key string) uint { return cast.ToUint(v.Get(key)) } +// GetUint16 returns the value associated with the key as an unsigned integer. +func GetUint16(key string) uint16 { return v.GetUint16(key) } + +func (v *Viper) GetUint16(key string) uint16 { + return cast.ToUint16(v.Get(key)) +} + // GetUint32 returns the value associated with the key as an unsigned integer. func GetUint32(key string) uint32 { return v.GetUint32(key) } + func (v *Viper) GetUint32(key string) uint32 { return cast.ToUint32(v.Get(key)) } // GetUint64 returns the value associated with the key as an unsigned integer. func GetUint64(key string) uint64 { return v.GetUint64(key) } + func (v *Viper) GetUint64(key string) uint64 { return cast.ToUint64(v.Get(key)) } // GetFloat64 returns the value associated with the key as a float64. func GetFloat64(key string) float64 { return v.GetFloat64(key) } + func (v *Viper) GetFloat64(key string) float64 { return cast.ToFloat64(v.Get(key)) } // GetTime returns the value associated with the key as time. func GetTime(key string) time.Time { return v.GetTime(key) } + func (v *Viper) GetTime(key string) time.Time { return cast.ToTime(v.Get(key)) } // GetDuration returns the value associated with the key as a duration. func GetDuration(key string) time.Duration { return v.GetDuration(key) } + func (v *Viper) GetDuration(key string) time.Duration { return cast.ToDuration(v.Get(key)) } // GetIntSlice returns the value associated with the key as a slice of int values. func GetIntSlice(key string) []int { return v.GetIntSlice(key) } + func (v *Viper) GetIntSlice(key string) []int { return cast.ToIntSlice(v.Get(key)) } // GetStringSlice returns the value associated with the key as a slice of strings. func GetStringSlice(key string) []string { return v.GetStringSlice(key) } + func (v *Viper) GetStringSlice(key string) []string { return cast.ToStringSlice(v.Get(key)) } // GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } -func (v *Viper) GetStringMap(key string) map[string]interface{} { +func GetStringMap(key string) map[string]any { return v.GetStringMap(key) } + +func (v *Viper) GetStringMap(key string) map[string]any { return cast.ToStringMap(v.Get(key)) } // GetStringMapString returns the value associated with the key as a map of strings. func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } + func (v *Viper) GetStringMapString(key string) map[string]string { return cast.ToStringMapString(v.Get(key)) } // GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } + func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { return cast.ToStringMapStringSlice(v.Get(key)) } @@ -886,31 +1093,65 @@ func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { // GetSizeInBytes returns the size of the value associated with the given key // in bytes. func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } + func (v *Viper) GetSizeInBytes(key string) uint { sizeStr := cast.ToString(v.Get(key)) return parseSizeInBytes(sizeStr) } // UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { +func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { return v.UnmarshalKey(key, rawVal, opts...) } -func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { + +func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) } // Unmarshal unmarshals the config into a Struct. Make sure that the tags // on the fields of the structure are properly set. -func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { +func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { return v.Unmarshal(rawVal, opts...) } -func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { - return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) + +func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { + keys := v.AllKeys() + + if features.BindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) + } + + // TODO: struct keys should be enough? + return decode(v.getSettings(keys), defaultDecoderConfig(rawVal, opts...)) } -// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot -// of time.Duration values & string slices -func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { +func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { + var structKeyMap map[string]any + + err := decode(input, defaultDecoderConfig(&structKeyMap, opts...)) + if err != nil { + return nil, err + } + + flattenedStructKeyMap := v.flattenAndMergeMap(map[string]bool{}, structKeyMap, "") + + r := make([]string, 0, len(flattenedStructKeyMap)) + for v := range flattenedStructKeyMap { + r = append(r, v) + } + + return r, nil +} + +// defaultDecoderConfig returns default mapstructure.DecoderConfig with support +// of time.Duration values & string slices. +func defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { c := &mapstructure.DecoderConfig{ Metadata: nil, Result: output, @@ -926,8 +1167,8 @@ func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *maps return c } -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -func decode(input interface{}, config *mapstructure.DecoderConfig) error { +// decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. +func decode(input any, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) if err != nil { return err @@ -937,19 +1178,34 @@ func decode(input interface{}, config *mapstructure.DecoderConfig) error { // UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent // in the destination struct. -func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { +func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { return v.UnmarshalExact(rawVal, opts...) } -func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { + +func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { config := defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true - return decode(v.AllSettings(), config) + keys := v.AllKeys() + + if features.BindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) + } + + // TODO: struct keys should be enough? + return decode(v.getSettings(keys), config) } // BindPFlags binds a full flag set to the configuration, using each flag's long // name as the config key. func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } + func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { return v.BindFlagValues(pflagValueSet{flags}) } @@ -957,17 +1213,21 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { // BindPFlag binds a specific key to a pflag (as used by cobra). // Example (where serverCmd is a Cobra instance): // -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } + func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } return v.BindFlagValue(key, pflagValue{flag}) } // BindFlagValues binds a full FlagValue set to the configuration, using each flag's long // name as the config key. func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } + func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { flags.VisitAll(func(flag FlagValue) { if err = v.BindFlagValue(flag.Name(), flag); err != nil { @@ -979,6 +1239,7 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { // BindFlagValue binds a specific key to a FlagValue. func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } + func (v *Viper) BindFlagValue(key string, flag FlagValue) error { if flag == nil { return fmt.Errorf("flag for %q is nil", key) @@ -990,27 +1251,38 @@ func (v *Viper) BindFlagValue(key string, flag FlagValue) error { // BindEnv binds a Viper key to a ENV variable. // ENV variables are case sensitive. // If only a key is provided, it will use the env key matching the key, uppercased. +// If more arguments are provided, they will represent the env variable names that +// should bind to this key and will be taken in the specified order. // EnvPrefix will be used when set when env name is not provided. func BindEnv(input ...string) error { return v.BindEnv(input...) } + func (v *Viper) BindEnv(input ...string) error { - var key, envkey string if len(input) == 0 { return fmt.Errorf("missing key to bind to") } - key = strings.ToLower(input[0]) + key := strings.ToLower(input[0]) if len(input) == 1 { - envkey = v.mergeWithEnvPrefix(key) + v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) } else { - envkey = input[1] + v.env[key] = append(v.env[key], input[1:]...) } - v.env[key] = envkey - return nil } +// MustBindEnv wraps BindEnv in a panic. +// If there is an error binding an environment variable, MustBindEnv will +// panic. +func MustBindEnv(input ...string) { v.MustBindEnv(input...) } + +func (v *Viper) MustBindEnv(input ...string) { + if err := v.BindEnv(input...); err != nil { + panic(fmt.Sprintf("error while binding environment variable: %v", err)) + } +} + // Given a key, find the value. // // Viper will check to see if an alias exists first. @@ -1020,9 +1292,9 @@ func (v *Viper) BindEnv(input ...string) error { // corresponds to a flag, the flag's default value is returned. // // Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { +func (v *Viper) find(lcaseKey string, flagDefault bool) any { var ( - val interface{} + val any exists bool path = strings.Split(lcaseKey, v.keyDelim) nested = len(path) > 1 @@ -1055,7 +1327,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1065,8 +1337,15 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "durationSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + slice := strings.Split(s, ",") + return cast.ToDurationSlice(slice) case "stringToString": return stringToStringConv(flag.ValueString()) + case "stringToInt": + return stringToIntConv(flag.ValueString()) default: return flag.ValueString() } @@ -1077,19 +1356,22 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { // Env override next if v.automaticEnvApplied { + envKey := strings.Join(append(v.parents, lcaseKey), ".") // even if it hasn't been registered, if automaticEnv is used, // check any Get request - if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok { + if val, ok := v.getEnv(v.mergeWithEnvPrefix(envKey)); ok { return val } if nested && v.isPathShadowedInAutoEnv(path) != "" { return nil } } - envkey, exists := v.env[lcaseKey] + envkeys, exists := v.env[lcaseKey] if exists { - if val, ok := v.getEnv(envkey); ok { - return val + for _, envkey := range envkeys { + if val, ok := v.getEnv(envkey); ok { + return val + } } } if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { @@ -1097,7 +1379,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { } // Config file next - val = v.searchMapWithPathPrefixes(v.config, path) + val = v.searchIndexableWithPathPrefixes(v.config, path) if val != nil { return val } @@ -1132,7 +1414,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1144,6 +1426,13 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToIntSlice(res) case "stringToString": return stringToStringConv(flag.ValueString()) + case "stringToInt": + return stringToIntConv(flag.ValueString()) + case "durationSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + slice := strings.Split(s, ",") + return cast.ToDurationSlice(slice) default: return flag.ValueString() } @@ -1164,25 +1453,49 @@ func readAsCSV(val string) ([]string, error) { } // mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 -// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap -func stringToStringConv(val string) interface{} { +// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. +func stringToStringConv(val string) any { val = strings.Trim(val, "[]") // An empty string would cause an empty map - if len(val) == 0 { - return map[string]interface{}{} + if val == "" { + return map[string]any{} } r := csv.NewReader(strings.NewReader(val)) ss, err := r.Read() if err != nil { return nil } - out := make(map[string]interface{}, len(ss)) + out := make(map[string]any, len(ss)) for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { + k, vv, found := strings.Cut(pair, "=") + if !found { + return nil + } + out[k] = vv + } + return out +} + +// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/d5e0c0615acee7028e1e2740a11102313be88de1/string_to_int.go#L68 +// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. +func stringToIntConv(val string) any { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if val == "" { + return map[string]any{} + } + ss := strings.Split(val, ",") + out := make(map[string]any, len(ss)) + for _, pair := range ss { + k, vv, found := strings.Cut(pair, "=") + if !found { + return nil + } + var err error + out[k], err = strconv.Atoi(vv) + if err != nil { return nil } - out[kv[0]] = kv[1] } return out } @@ -1190,15 +1503,17 @@ func stringToStringConv(val string) interface{} { // IsSet checks to see if the key has been set in any of the data locations. // IsSet is case-insensitive for a key. func IsSet(key string) bool { return v.IsSet(key) } + func (v *Viper) IsSet(key string) bool { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, false) return val != nil } -// AutomaticEnv has Viper check ENV variables for all. -// keys set in config, default & flags +// AutomaticEnv makes Viper check if environment variables match any of the existing keys +// (config, default or flags). If matching env vars are found, they are loaded into Viper. func AutomaticEnv() { v.AutomaticEnv() } + func (v *Viper) AutomaticEnv() { v.automaticEnvApplied = true } @@ -1207,18 +1522,20 @@ func (v *Viper) AutomaticEnv() { // Useful for mapping an environmental variable to a key that does // not match it. func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } + func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { v.envKeyReplacer = r } // RegisterAlias creates an alias that provides another accessor for the same key. // This enables one to change a name without breaking the application. -func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } -func (v *Viper) RegisterAlias(alias string, key string) { +func RegisterAlias(alias, key string) { v.RegisterAlias(alias, key) } + +func (v *Viper) RegisterAlias(alias, key string) { v.registerAlias(alias, strings.ToLower(key)) } -func (v *Viper) registerAlias(alias string, key string) { +func (v *Viper) registerAlias(alias, key string) { alias = strings.ToLower(alias) if alias != key && alias != v.realKey(key) { _, exists := v.aliases[alias] @@ -1246,14 +1563,15 @@ func (v *Viper) registerAlias(alias string, key string) { v.aliases[alias] = key } } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) } } func (v *Viper) realKey(key string) string { newkey, exists := v.aliases[key] if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) + v.logger.Debug("key is an alias", "alias", key, "to", newkey) + return v.realKey(newkey) } return key @@ -1261,19 +1579,23 @@ func (v *Viper) realKey(key string) string { // InConfig checks to see if the given key (or an alias) is in the config file. func InConfig(key string) bool { return v.InConfig(key) } -func (v *Viper) InConfig(key string) bool { - // if the requested key is an alias, then return the proper key - key = v.realKey(key) - _, exists := v.config[key] - return exists +func (v *Viper) InConfig(key string) bool { + lcaseKey := strings.ToLower(key) + + // if the requested key is an alias, then return the proper key + lcaseKey = v.realKey(lcaseKey) + path := strings.Split(lcaseKey, v.keyDelim) + + return v.searchIndexableWithPathPrefixes(v.config, path) != nil } // SetDefault sets the default value for this key. // SetDefault is case-insensitive for a key. // Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } -func (v *Viper) SetDefault(key string, value interface{}) { +func SetDefault(key string, value any) { v.SetDefault(key, value) } + +func (v *Viper) SetDefault(key string, value any) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) value = toCaseInsensitiveValue(value) @@ -1290,8 +1612,9 @@ func (v *Viper) SetDefault(key string, value interface{}) { // Set is case-insensitive for a key. // Will be used instead of values obtained via // flags, config file, ENV, default, or key/value store. -func Set(key string, value interface{}) { v.Set(key, value) } -func (v *Viper) Set(key string, value interface{}) { +func Set(key string, value any) { v.Set(key, value) } + +func (v *Viper) Set(key string, value any) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) value = toCaseInsensitiveValue(value) @@ -1307,8 +1630,9 @@ func (v *Viper) Set(key string, value interface{}) { // ReadInConfig will discover and load the configuration file from disk // and key/value stores, searching in one of the defined paths. func ReadInConfig() error { return v.ReadInConfig() } + func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") + v.logger.Info("attempting to read in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1318,13 +1642,13 @@ func (v *Viper) ReadInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - jww.DEBUG.Println("Reading file: ", filename) + v.logger.Debug("reading file", "file", filename) file, err := afero.ReadFile(v.fs, filename) if err != nil { return err } - config := make(map[string]interface{}) + config := make(map[string]any) err = v.unmarshalReader(bytes.NewReader(file), config) if err != nil { @@ -1337,8 +1661,9 @@ func (v *Viper) ReadInConfig() error { // MergeInConfig merges a new configuration with an existing config. func MergeInConfig() error { return v.MergeInConfig() } + func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") + v.logger.Info("attempting to merge in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1359,15 +1684,17 @@ func (v *Viper) MergeInConfig() error { // ReadConfig will read a configuration file, setting existing keys to nil if the // key does not exist in the file. func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } + func (v *Viper) ReadConfig(in io.Reader) error { - v.config = make(map[string]interface{}) + v.config = make(map[string]any) return v.unmarshalReader(in, v.config) } // MergeConfig merges a new configuration with an existing config. func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } + func (v *Viper) MergeConfig(in io.Reader) error { - cfg := make(map[string]interface{}) + cfg := make(map[string]any) if err := v.unmarshalReader(in, cfg); err != nil { return err } @@ -1376,10 +1703,11 @@ func (v *Viper) MergeConfig(in io.Reader) error { // MergeConfigMap merges the configuration from the map given with an existing config. // Note that the map given may be modified. -func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } -func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { +func MergeConfigMap(cfg map[string]any) error { return v.MergeConfigMap(cfg) } + +func (v *Viper) MergeConfigMap(cfg map[string]any) error { if v.config == nil { - v.config = make(map[string]interface{}) + v.config = make(map[string]any) } insensitiviseMap(cfg) mergeMaps(cfg, v.config, nil) @@ -1388,6 +1716,7 @@ func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { // WriteConfig writes the current configuration to a file. func WriteConfig() error { return v.WriteConfig() } + func (v *Viper) WriteConfig() error { filename, err := v.getConfigFile() if err != nil { @@ -1398,6 +1727,7 @@ func (v *Viper) WriteConfig() error { // SafeWriteConfig writes current configuration to file only if the file does not exist. func SafeWriteConfig() error { return v.SafeWriteConfig() } + func (v *Viper) SafeWriteConfig() error { if len(v.configPaths) < 1 { return errors.New("missing configuration for 'configPath'") @@ -1407,12 +1737,14 @@ func (v *Viper) SafeWriteConfig() error { // WriteConfigAs writes current configuration to a given filename. func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } + func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } + func (v *Viper) SafeWriteConfigAs(filename string) error { alreadyExists, err := afero.Exists(v.fs, filename) if alreadyExists && err == nil { @@ -1422,11 +1754,12 @@ func (v *Viper) SafeWriteConfigAs(filename string) error { } func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") + v.logger.Info("attempting to write configuration to file") + var configType string ext := filepath.Ext(filename) - if ext != "" { + if ext != "" && ext != filepath.Base(filename) { configType = ext[1:] } else { configType = v.configType @@ -1439,7 +1772,7 @@ func (v *Viper) writeConfig(filename string, force bool) error { return UnsupportedConfigError(configType) } if v.config == nil { - v.config = make(map[string]interface{}) + v.config = make(map[string]any) } flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY if !force { @@ -1458,86 +1791,16 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]interface{}) error { - return v.unmarshalReader(in, c) -} -func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { +func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(buf.String()) + switch format := strings.ToLower(v.getConfigType()); format { + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": + err := v.decoderRegistry.Decode(format, buf.Bytes(), c) if err != nil { return ConfigParseError{err} } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) - if err != nil { - return ConfigParseError{err} - } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } - - case "dotenv", "env": - env, err := gotenv.StrictParse(buf) - if err != nil { - return ConfigParseError{err} - } - for k, v := range env { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - - case "ini": - cfg := ini.Empty() - err := cfg.Append(buf.Bytes()) - if err != nil { - return ConfigParseError{err} - } - sections := cfg.Sections() - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - c[section.Name()+"."+key.Name()] = value - } - } } insensitiviseMap(c) @@ -1548,97 +1811,21 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": + b, err := v.encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } + _, err = f.WriteString(string(b)) if err != nil { return ConfigMarshalError{err} } - - case "hcl": - b, err := json.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) - if err != nil { - return ConfigMarshalError{err} - } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "dotenv", "env": - lines := []string{} - for _, key := range v.AllKeys() { - envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) - val := v.Get(key) - lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) - } - s := strings.Join(lines, "\n") - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - - case "ini": - keys := v.AllKeys() - cfg := ini.Empty() - ini.PrettyFormat = false - for i := 0; i < len(keys); i++ { - key := keys[i] - lastSep := strings.LastIndex(key, ".") - sectionName := key[:(lastSep)] - keyName := key[(lastSep + 1):] - if sectionName == "default" { - sectionName = "" - } - cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string)) - } - cfg.WriteTo(f) } return nil } -func keyExists(k string, m map[string]interface{}) string { +func keyExists(k string, m map[string]any) string { lk := strings.ToLower(k) for mk := range m { lmk := strings.ToLower(mk) @@ -1650,24 +1837,33 @@ func keyExists(k string, m map[string]interface{}) string { } func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { - tgt := map[string]interface{}{} + src map[any]any, +) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[fmt.Sprintf("%v", k)] = v } return tgt } -func castMapStringToMapInterface(src map[string]string) map[string]interface{} { - tgt := map[string]interface{}{} +func castMapStringSliceToMapInterface(src map[string][]string) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[k] = v } return tgt } -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { - tgt := map[string]interface{}{} +func castMapStringToMapInterface(src map[string]string) map[string]any { + tgt := map[string]any{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[k] = v } @@ -1675,16 +1871,15 @@ func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} } // mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[interface{}]interface{}` +// insistence on parsing nested structures as `map[any]any` // instead of using a `string` as the key for nest structures beyond one level // deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]interface{}` instead. -func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { +// `map[string]any` instead. +func mergeMaps(src, tgt map[string]any, itgt map[any]any) { for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + v.logger.Debug("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1694,7 +1889,7 @@ func mergeMaps( tv, ok := tgt[tk] if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + v.logger.Debug("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1704,28 +1899,52 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) - if svType != tvType { - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - continue - } - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) + v.logger.Debug( + "processing", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) switch ttv := tv.(type) { - case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) + case map[any]any: + v.logger.Debug("merging maps (must convert)") + tsv, ok := sv.(map[any]any) + if !ok { + v.logger.Error( + "Could not cast sv to map[any]any", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) - case map[string]interface{}: - jww.TRACE.Printf("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) + case map[string]any: + v.logger.Debug("merging maps") + tsv, ok := sv.(map[string]any) + if !ok { + v.logger.Error( + "Could not cast sv to map[string]any", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + mergeMaps(tsv, ttv, nil) default: - jww.TRACE.Printf("setting value") + v.logger.Debug("setting value") tgt[tk] = sv if itgt != nil { itgt[tk] = sv @@ -1737,6 +1956,7 @@ func mergeMaps( // ReadRemoteConfig attempts to get configuration from a remote source // and read it in the remote configuration registry. func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + func (v *Viper) ReadRemoteConfig() error { return v.getKeyValueConfig() } @@ -1756,18 +1976,26 @@ func (v *Viper) getKeyValueConfig() error { return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") } + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { val, err := v.getRemoteConfig(rp) if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + continue } + v.kvstore = val + return nil } return RemoteConfigError("No Files Found") } -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { reader, err := RemoteConfig.Get(provider) if err != nil { return nil, err @@ -1778,6 +2006,10 @@ func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{} // Retrieve the first found remote configuration. func (v *Viper) watchKeyValueConfigOnChannel() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { respc, _ := RemoteConfig.WatchChannel(rp) // Todo: Add quit channel @@ -1795,9 +2027,15 @@ func (v *Viper) watchKeyValueConfigOnChannel() error { // Retrieve the first found remote configuration. func (v *Viper) watchKeyValueConfig() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { val, err := v.watchRemoteConfig(rp) if err != nil { + v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) + continue } v.kvstore = val @@ -1806,7 +2044,7 @@ func (v *Viper) watchKeyValueConfig() error { return RemoteConfigError("No Files Found") } -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { reader, err := RemoteConfig.Watch(provider) if err != nil { return nil, err @@ -1816,15 +2054,16 @@ func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface } // AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim separator +// Nested keys are returned with a v.keyDelim separator. func AllKeys() []string { return v.AllKeys() } + func (v *Viper) AllKeys() []string { m := map[string]bool{} // add all paths, by order of descending priority to ensure correct shadowing m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") m = v.flattenAndMergeMap(m, v.override, "") m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) m = v.flattenAndMergeMap(m, v.config, "") m = v.flattenAndMergeMap(m, v.kvstore, "") m = v.flattenAndMergeMap(m, v.defaults, "") @@ -1839,11 +2078,12 @@ func (v *Viper) AllKeys() []string { // flattenAndMergeMap recursively flattens the given map into a map[string]bool // of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. +// - each path is merged into a single key string, delimited with v.keyDelim +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// // The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]any, prefix string) map[string]bool { if shadow != nil && prefix != "" && shadow[prefix] { // prefix is shadowed => nothing more to flatten return shadow @@ -1852,16 +2092,16 @@ func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interfac shadow = make(map[string]bool) } - var m2 map[string]interface{} + var m2 map[string]any if prefix != "" { prefix += v.keyDelim } for k, val := range m { fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: m2 = cast.ToStringMap(val) default: // immediate value @@ -1876,7 +2116,7 @@ func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interfac // mergeFlatMap merges the given maps, excluding values of the second map // shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]any) map[string]bool { // scan keys outer: for k := range m { @@ -1896,12 +2136,17 @@ outer: return shadow } -// AllSettings merges all settings and returns them as a map[string]interface{}. -func AllSettings() map[string]interface{} { return v.AllSettings() } -func (v *Viper) AllSettings() map[string]interface{} { - m := map[string]interface{}{} +// AllSettings merges all settings and returns them as a map[string]any. +func AllSettings() map[string]any { return v.AllSettings() } + +func (v *Viper) AllSettings() map[string]any { + return v.getSettings(v.AllKeys()) +} + +func (v *Viper) getSettings(keys []string) map[string]any { + m := map[string]any{} // start from the list of keys, and construct the map one value at a time - for _, k := range v.AllKeys() { + for _, k := range keys { value := v.Get(k) if value == nil { // should not happen, since AllKeys() returns only keys holding a value, @@ -1919,6 +2164,7 @@ func (v *Viper) AllSettings() map[string]interface{} { // SetFs sets the filesystem to use to read configuration. func SetFs(fs afero.Fs) { v.SetFs(fs) } + func (v *Viper) SetFs(fs afero.Fs) { v.fs = fs } @@ -1926,6 +2172,7 @@ func (v *Viper) SetFs(fs afero.Fs) { // SetConfigName sets name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } + func (v *Viper) SetConfigName(in string) { if in != "" { v.configName = in @@ -1936,6 +2183,7 @@ func (v *Viper) SetConfigName(in string) { // SetConfigType sets the type of the configuration returned by the // remote source, e.g. "json". func SetConfigType(in string) { v.SetConfigType(in) } + func (v *Viper) SetConfigType(in string) { if in != "" { v.configType = in @@ -1944,10 +2192,18 @@ func (v *Viper) SetConfigType(in string) { // SetConfigPermissions sets the permissions for the config file. func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } + func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } +// IniLoadOptions sets the load options for ini parsing. +func IniLoadOptions(in ini.LoadOptions) Option { + return optionFunc(func(v *Viper) { + v.iniLoadOptions = in + }) +} + func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType @@ -1978,48 +2234,19 @@ func (v *Viper) getConfigFile() (string, error) { return v.configFile, nil } -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - // Debug prints all configuration registries for debugging // purposes. -func Debug() { v.Debug() } -func (v *Viper) Debug() { - fmt.Printf("Aliases:\n%#v\n", v.aliases) - fmt.Printf("Override:\n%#v\n", v.override) - fmt.Printf("PFlags:\n%#v\n", v.pflags) - fmt.Printf("Env:\n%#v\n", v.env) - fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) - fmt.Printf("Config:\n%#v\n", v.config) - fmt.Printf("Defaults:\n%#v\n", v.defaults) +func Debug() { v.Debug() } +func DebugTo(w io.Writer) { v.DebugTo(w) } + +func (v *Viper) Debug() { v.DebugTo(os.Stdout) } + +func (v *Viper) DebugTo(w io.Writer) { + fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases) + fmt.Fprintf(w, "Override:\n%#v\n", v.override) + fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags) + fmt.Fprintf(w, "Env:\n%#v\n", v.env) + fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore) + fmt.Fprintf(w, "Config:\n%#v\n", v.config) + fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults) } diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index 246660b2..78dc1f8b 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -4,20 +4,20 @@ [![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) Objx - Go package for dealing with maps, slices, JSON and other data. Get started: - Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx ## Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. ### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: m, err := objx.FromJSON(json) @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index 7746f516..8a79e8d6 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,7 +1,4 @@ -version: '2' - -env: - GOFLAGS: -mod=vendor +version: '3' tasks: default: diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 4c604558..72f1d1c1 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -14,17 +14,17 @@ const ( // For example, `location.address.city` PathSeparator string = "." - // arrayAccesRegexString is the regex used to extract the array number + // arrayAccessRegexString is the regex used to extract the array number // from the access path - arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` // mapAccessRegexString is the regex used to extract the map key // from the access path mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) // mapAccessRegex is the compiled mapAccessRegexString var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) @@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) // // Get can only operate directly on map[string]interface{} and []interface. // -// Example +// # Example // // To access the title of the third chapter of the second book, do: // -// o.Get("books[1].chapters[2].title") +// o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { rawObj := access(m, selector, nil, false) return &Value{data: rawObj} @@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value { // // Set can only operate directly on map[string]interface{} and []interface // -// Example +// # Example // // To set the title of the third chapter of the second book, do: // -// o.Set("books[1].chapters[2].title","Time to Go") +// o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { access(m, selector, value, true) return m } -// getIndex returns the index, which is hold in s by two braches. -// It also returns s withour the index part, e.g. name[1] will return (1, name). +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). // If no index is found, -1 is returned func getIndex(s string) (int, string) { - arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) if len(arrayMatches) > 0 { // Get the key into the map selector := arrayMatches[1] // Get the index into the array at the key - // We know this cannt fail because arrayMatches[2] is an int for sure + // We know this can't fail because arrayMatches[2] is an int for sure index, _ := strconv.Atoi(arrayMatches[2]) return index, selector } diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go index 080aa46e..01c63d7d 100644 --- a/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -15,7 +15,7 @@ import ( const SignatureSeparator = "_" // URLValuesSliceKeySuffix is the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c @@ -30,7 +30,7 @@ const ( ) // SetURLValuesSliceKeySuffix sets the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go index 6d6af1a8..b170af74 100644 --- a/vendor/github.com/stretchr/objx/doc.go +++ b/vendor/github.com/stretchr/objx/doc.go @@ -1,19 +1,19 @@ /* -Objx - Go package for dealing with maps, slices, JSON and other data. +Package objx provides utilities for dealing with maps, slices, JSON and other data. -Overview +# Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. -Pattern +# Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) + m, err := objx.FromJSON(json) NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. @@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") + m.Get("places[0].latlng") Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } + if m.Get("code").IsStr() { // Your code... } Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() + m.Get("code").Int() If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) + Get("code").Int(-1) If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. -Reading data +# Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) -Ranging +# Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } */ package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index a64712a0..ab9f9ae6 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -47,17 +47,16 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// // Returns nil if any key argument is non-string or if there are an odd number of arguments. // -// Example +// # Example // // To easily create Maps: // -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) diff --git a/vendor/github.com/stretchr/testify/assert/BUILD.bazel b/vendor/github.com/stretchr/testify/assert/BUILD.bazel index 982dc1c4..62d7a4e0 100644 --- a/vendor/github.com/stretchr/testify/assert/BUILD.bazel +++ b/vendor/github.com/stretchr/testify/assert/BUILD.bazel @@ -4,8 +4,6 @@ go_library( name = "assert", srcs = [ "assertion_compare.go", - "assertion_compare_can_convert.go", - "assertion_compare_legacy.go", "assertion_format.go", "assertion_forward.go", "assertion_order.go", diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88..4d4b4aad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903..00000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a..00000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c7..3ddab109 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec..a84e09bd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba..0b7570f2 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false } - return false + if !isNumericType(expectedType) || !isNumericType(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) + } + + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - if isNilableKind && value.IsNil() { - return true + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28..861ed4b7 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44..213bde2e 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] @@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { - // expected call found but it has already been called with repeatable times + // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen Assertions */ -type assertExpectationser interface { +type assertExpectationiser interface { AssertExpectations(TestingT) bool } @@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") obj = m } - m := obj.(assertExpectationser) + m := obj.(assertExpectationiser) if !m.AssertExpectations(t) { t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) return false @@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { satisfied, reason := m.checkExpectation(expectedCall) if !satisfied { failedExpectations++ + t.Logf(reason) } - t.Logf(reason) } if failedExpectations != 0 { @@ -758,25 +763,33 @@ const ( Anything = "mock.Anything" ) -// AnythingOfTypeArgument is a string that contains the type of an argument +// AnythingOfTypeArgument contains the type of an argument // for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. +// anythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type anythingOfTypeArgument string + +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. // // For example: // // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) + return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument // for use when type checking. This is an alternative to AnythingOfType. // Used in Diff and Assert. type IsTypeArgument struct { - t interface{} + t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. @@ -786,7 +799,7 @@ type IsTypeArgument struct { // For example: // Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { - return &IsTypeArgument{t: t} + return &IsTypeArgument{t: reflect.TypeOf(t)} } // FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument @@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { - t := expected.(*IsTypeArgument).t - if reflect.TypeOf(t) != reflect.TypeOf(actual) { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { - t := expected.(*FunctionalOptionsArgument).value + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() - } + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) - } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } - } else { - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } } } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 63f85214..506a82f8 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b0933..eee8310a 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore index 2b8d4561..7db37c1d 100644 --- a/vendor/github.com/subosito/gotenv/.gitignore +++ b/vendor/github.com/subosito/gotenv/.gitignore @@ -1,3 +1,4 @@ *.test *.out annotate.json +profile.cov diff --git a/vendor/github.com/subosito/gotenv/.golangci.yaml b/vendor/github.com/subosito/gotenv/.golangci.yaml new file mode 100644 index 00000000..8c82a762 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.golangci.yaml @@ -0,0 +1,7 @@ +# Options for analysis running. +run: + timeout: 1m + +linters-settings: + gofmt: + simplify: true diff --git a/vendor/github.com/subosito/gotenv/.travis.yml b/vendor/github.com/subosito/gotenv/.travis.yml deleted file mode 100644 index 3370d5f4..00000000 --- a/vendor/github.com/subosito/gotenv/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x -os: - - linux - - osx -script: - - go test -test.v -coverprofile=coverage.out -covermode=count -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/subosito/gotenv/BUILD.bazel b/vendor/github.com/subosito/gotenv/BUILD.bazel index 1f581d96..cc354958 100644 --- a/vendor/github.com/subosito/gotenv/BUILD.bazel +++ b/vendor/github.com/subosito/gotenv/BUILD.bazel @@ -6,4 +6,8 @@ go_library( importmap = "peridot.resf.org/vendor/github.com/subosito/gotenv", importpath = "github.com/subosito/gotenv", visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/text/encoding/unicode", + "//vendor/golang.org/x/text/transform", + ], ) diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md index 67f68738..c4fe7d32 100644 --- a/vendor/github.com/subosito/gotenv/CHANGELOG.md +++ b/vendor/github.com/subosito/gotenv/CHANGELOG.md @@ -1,5 +1,63 @@ # Changelog +## [1.5.0] - 2023-08-15 + +### Fixed + +- Use io.Reader instead of custom Reader + +## [1.5.0] - 2023-08-15 + +### Added + +- Support for reading UTF16 files + +### Fixed + +- Scanner error handling +- Reader error handling + +## [1.4.2] - 2023-01-11 + +### Fixed + +- Env var initialization + +### Changed + +- More consitent line splitting + +## [1.4.1] - 2022-08-23 + +### Fixed + +- Missing file close + +### Changed + +- Updated dependencies + +## [1.4.0] - 2022-06-02 + +### Added + +- Add `Marshal` and `Unmarshal` helpers + +### Changed + +- The CI will now run a linter and the tests on PRs. + +## [1.3.0] - 2022-05-23 + +### Added + +- Support = within double-quoted strings +- Add support for multiline values + +### Changed + +- `OverLoad` prefer environment variables over local variables + ## [1.2.0] - 2019-08-03 ### Added @@ -30,7 +88,7 @@ ### Added - Supports carriage return in env -- Handle files with UTF-8 BOM +- Handle files with UTF-8 BOM ### Changed diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md index d610cdf0..fc9616e3 100644 --- a/vendor/github.com/subosito/gotenv/README.md +++ b/vendor/github.com/subosito/gotenv/README.md @@ -1,12 +1,11 @@ # gotenv -[![Build Status](https://travis-ci.org/subosito/gotenv.svg?branch=master)](https://travis-ci.org/subosito/gotenv) -[![Build status](https://ci.appveyor.com/api/projects/status/wb2e075xkfl0m0v2/branch/master?svg=true)](https://ci.appveyor.com/project/subosito/gotenv/branch/master) +[![Build Status](https://github.com/subosito/gotenv/workflows/Go%20workflow/badge.svg)](https://github.com/subosito/gotenv/actions) [![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) [![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) [![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) -Load environment variables dynamically in Go. +Load environment variables from `.env` or `io.Reader` in Go. ## Usage @@ -29,7 +28,7 @@ Once loaded you can use `os.Getenv()` to get the value of the variable. Let's say you have `.env` file: -``` +```sh APP_ID=1234567 APP_SECRET=abcdef ``` @@ -79,7 +78,6 @@ Besides above functions, `gotenv` also provides another functions that overrides - `gotenv.OverLoad` - `gotenv.OverApply` - Here's the example of this overrides behavior: ```go @@ -120,7 +118,7 @@ Just in case you want to parse environment variables from any `io.Reader`, goten pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) // gotenv.Env{"FOO": "test", "BAR": "test"} -err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) +pairs, err := gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) // gotenv.Env{"FOO": "bar"} ``` diff --git a/vendor/github.com/subosito/gotenv/appveyor.yml b/vendor/github.com/subosito/gotenv/appveyor.yml deleted file mode 100644 index 33b4c404..00000000 --- a/vendor/github.com/subosito/gotenv/appveyor.yml +++ /dev/null @@ -1,9 +0,0 @@ -build: off -clone_folder: c:\gopath\src\github.com\subosito\gotenv -environment: - GOPATH: c:\gopath -stack: go 1.10 -before_test: - - go get -t -test_script: - - go test -v -cover -race diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go index 745a3448..1191d358 100644 --- a/vendor/github.com/subosito/gotenv/gotenv.go +++ b/vendor/github.com/subosito/gotenv/gotenv.go @@ -3,11 +3,18 @@ package gotenv import ( "bufio" + "bytes" "fmt" "io" "os" + "path/filepath" "regexp" + "sort" + "strconv" "strings" + + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" ) const ( @@ -18,44 +25,41 @@ const ( variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` ) +// Byte order mark character +var ( + bomUTF8 = []byte("\xEF\xBB\xBF") + bomUTF16LE = []byte("\xFF\xFE") + bomUTF16BE = []byte("\xFE\xFF") +) + // Env holds key/value pair of valid environment variable type Env map[string]string -/* -Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. -When it's called with no argument, it will load `.env` file on the current path and set the environment variables. -Otherwise, it will loop over the filenames parameter and set the proper environment variables. -*/ +// Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. +// When it's called with no argument, it will load `.env` file on the current path and set the environment variables. +// Otherwise, it will loop over the filenames parameter and set the proper environment variables. func Load(filenames ...string) error { return loadenv(false, filenames...) } -/* -OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. -*/ +// OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. func OverLoad(filenames ...string) error { return loadenv(true, filenames...) } -/* -Must is wrapper function that will panic when supplied function returns an error. -*/ +// Must is wrapper function that will panic when supplied function returns an error. func Must(fn func(filenames ...string) error, filenames ...string) { if err := fn(filenames...); err != nil { panic(err.Error()) } } -/* -Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. -*/ +// Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. func Apply(r io.Reader) error { return parset(r, false) } -/* -OverApply is a function to load an io Reader then export and override the valid variables into environment variables. -*/ +// OverApply is a function to load an io Reader then export and override the valid variables into environment variables. func OverApply(r io.Reader) error { return parset(r, true) } @@ -72,11 +76,10 @@ func loadenv(override bool, filenames ...string) error { } err = parset(f, override) + f.Close() if err != nil { return err } - - f.Close() } return nil @@ -84,7 +87,7 @@ func loadenv(override bool, filenames ...string) error { // parse and set :) func parset(r io.Reader, override bool) error { - env, err := StrictParse(r) + env, err := strictParse(r, override) if err != nil { return err } @@ -110,7 +113,7 @@ func setenv(key, val string, override bool) { // It expands the value of a variable from the environment variable but does not set the value to the environment itself. // This function is skipping any invalid lines and only processing the valid one. func Parse(r io.Reader) Env { - env, _ := StrictParse(r) + env, _ := strictParse(r, false) return env } @@ -118,71 +121,226 @@ func Parse(r io.Reader) Env { // It expands the value of a variable from the environment variable but does not set the value to the environment itself. // This function is returning an error if there are any invalid lines. func StrictParse(r io.Reader) (Env, error) { - env := make(Env) - scanner := bufio.NewScanner(r) + return strictParse(r, false) +} - i := 1 - bom := string([]byte{239, 187, 191}) +// Read is a function to parse a file line by line and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is skipping any invalid lines and only processing the valid one. +func Read(filename string) (Env, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + return strictParse(f, false) +} + +// Unmarshal reads a string line by line and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is returning an error if there are any invalid lines. +func Unmarshal(str string) (Env, error) { + return strictParse(strings.NewReader(str), false) +} + +// Marshal outputs the given environment as a env file. +// Variables will be sorted by name. +func Marshal(env Env) (string, error) { + lines := make([]string, 0, len(env)) + for k, v := range env { + if d, err := strconv.Atoi(v); err == nil { + lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) + } else { + lines = append(lines, fmt.Sprintf(`%s=%q`, k, v)) + } + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +// Write serializes the given environment and writes it to a file +func Write(env Env, filename string) error { + content, err := Marshal(env) + if err != nil { + return err + } + // ensure the path exists + if err := os.MkdirAll(filepath.Dir(filename), 0o775); err != nil { + return err + } + // create or truncate the file + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(content + "\n") + if err != nil { + return err + } + + return file.Sync() +} + +// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). +// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). +func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, bufio.ErrFinalToken + } + + idx := bytes.IndexAny(data, "\r\n") + switch { + case atEOF && idx < 0: + return len(data), data, bufio.ErrFinalToken + + case idx < 0: + return 0, nil, nil + } + + // consume CR or LF + eol := idx + 1 + // detect CRLF + if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { + eol++ + } + + return eol, data[:idx], nil +} + +func strictParse(r io.Reader, override bool) (Env, error) { + env := make(Env) + + buf := new(bytes.Buffer) + tee := io.TeeReader(r, buf) + + // There can be a maximum of 3 BOM bytes. + bomByteBuffer := make([]byte, 3) + _, err := tee.Read(bomByteBuffer) + if err != nil && err != io.EOF { + return env, err + } + + z := io.MultiReader(buf, r) + + // We chooes a different scanner depending on file encoding. + var scanner *bufio.Scanner + + if bytes.HasPrefix(bomByteBuffer, bomUTF8) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF8BOM.NewDecoder())) + } else if bytes.HasPrefix(bomByteBuffer, bomUTF16LE) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.LittleEndian, unicode.ExpectBOM).NewDecoder())) + } else if bytes.HasPrefix(bomByteBuffer, bomUTF16BE) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.BigEndian, unicode.ExpectBOM).NewDecoder())) + } else { + scanner = bufio.NewScanner(z) + } + + scanner.Split(splitLines) for scanner.Scan() { - line := scanner.Text() - - if i == 1 { - line = strings.TrimPrefix(line, bom) + if err := scanner.Err(); err != nil { + return env, err } - i++ + line := strings.TrimSpace(scanner.Text()) + if line == "" || line[0] == '#' { + continue + } - err := parseLine(line, env) + quote := "" + // look for the delimiter character + idx := strings.Index(line, "=") + if idx == -1 { + idx = strings.Index(line, ":") + } + // look for a quote character + if idx > 0 && idx < len(line)-1 { + val := strings.TrimSpace(line[idx+1:]) + if val[0] == '"' || val[0] == '\'' { + quote = val[:1] + // look for the closing quote character within the same line + idx = strings.LastIndex(strings.TrimSpace(val[1:]), quote) + if idx >= 0 && val[idx] != '\\' { + quote = "" + } + } + } + // look for the closing quote character + for quote != "" && scanner.Scan() { + l := scanner.Text() + line += "\n" + l + idx := strings.LastIndex(l, quote) + if idx > 0 && l[idx-1] == '\\' { + // foud a matching quote character but it's escaped + continue + } + if idx >= 0 { + // foud a matching quote + quote = "" + } + } + + if quote != "" { + return env, fmt.Errorf("missing quotes") + } + + err := parseLine(line, env, override) if err != nil { return env, err } } - return env, nil + return env, scanner.Err() } -func parseLine(s string, env Env) error { - rl := regexp.MustCompile(linePattern) - rm := rl.FindStringSubmatch(s) +var ( + lineRgx = regexp.MustCompile(linePattern) + unescapeRgx = regexp.MustCompile(`\\([^$])`) + varRgx = regexp.MustCompile(variablePattern) +) + +func parseLine(s string, env Env, override bool) error { + rm := lineRgx.FindStringSubmatch(s) if len(rm) == 0 { return checkFormat(s, env) } - key := rm[1] - val := rm[2] + key := strings.TrimSpace(rm[1]) + val := strings.TrimSpace(rm[2]) - // determine if string has quote prefix - hdq := strings.HasPrefix(val, `"`) + var hsq, hdq bool - // determine if string has single quote prefix - hsq := strings.HasPrefix(val, `'`) + // check if the value is quoted + if l := len(val); l >= 2 { + l -= 1 + // has double quotes + hdq = val[0] == '"' && val[l] == '"' + // has single quotes + hsq = val[0] == '\'' && val[l] == '\'' - // trim whitespace - val = strings.Trim(val, " ") - - // remove quotes '' or "" - rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`) - val = rq.ReplaceAllString(val, "$2") + // remove quotes '' or "" + if hsq || hdq { + val = val[1:l] + } + } if hdq { - val = strings.Replace(val, `\n`, "\n", -1) - val = strings.Replace(val, `\r`, "\r", -1) + val = strings.ReplaceAll(val, `\n`, "\n") + val = strings.ReplaceAll(val, `\r`, "\r") // Unescape all characters except $ so variables can be escaped properly - re := regexp.MustCompile(`\\([^$])`) - val = re.ReplaceAllString(val, "$1") + val = unescapeRgx.ReplaceAllString(val, "$1") } - rv := regexp.MustCompile(variablePattern) - fv := func(s string) string { - return varReplacement(s, hsq, env) + if !hsq { + fv := func(s string) string { + return varReplacement(s, hsq, env, override) + } + val = varRgx.ReplaceAllStringFunc(val, fv) } - val = rv.ReplaceAllStringFunc(val, fv) - val = parseVal(val, env) - env[key] = val return nil } @@ -201,18 +359,23 @@ func parseExport(st string, env Env) error { return nil } -func varReplacement(s string, hsq bool, env Env) string { - if strings.HasPrefix(s, "\\") { - return strings.TrimPrefix(s, "\\") +var varNameRgx = regexp.MustCompile(`(\$)(\{?([A-Z0-9_]+)\}?)`) + +func varReplacement(s string, hsq bool, env Env, override bool) string { + if s == "" { + return s + } + + if s[0] == '\\' { + // the dollar sign is escaped + return s[1:] } if hsq { return s } - sn := `(\$)(\{?([A-Z0-9_]+)\}?)` - rn := regexp.MustCompile(sn) - mn := rn.FindStringSubmatch(s) + mn := varNameRgx.FindStringSubmatch(s) if len(mn) == 0 { return s @@ -220,18 +383,21 @@ func varReplacement(s string, hsq bool, env Env) string { v := mn[3] - replace, ok := env[v] - if !ok { - replace = os.Getenv(v) + if replace, ok := os.LookupEnv(v); ok && !override { + return replace } - return replace + if replace, ok := env[v]; ok { + return replace + } + + return os.Getenv(v) } func checkFormat(s string, env Env) error { st := strings.TrimSpace(s) - if (st == "") || strings.HasPrefix(st, "#") { + if st == "" || st[0] == '#' { return nil } @@ -241,25 +407,3 @@ func checkFormat(s string, env Env) error { return fmt.Errorf("line `%s` doesn't match format", s) } - -func parseVal(val string, env Env) string { - if strings.Contains(val, "=") { - if !(val == "\n" || val == "\r") { - kv := strings.Split(val, "\n") - - if len(kv) == 1 { - kv = strings.Split(val, "\r") - } - - if len(kv) > 1 { - val = kv[0] - - for i := 1; i < len(kv); i++ { - parseLine(kv[i], env) - } - } - } - } - - return val -} diff --git a/vendor/github.com/xanzy/ssh-agent/BUILD.bazel b/vendor/github.com/xanzy/ssh-agent/BUILD.bazel index 53c25345..2cccf020 100644 --- a/vendor/github.com/xanzy/ssh-agent/BUILD.bazel +++ b/vendor/github.com/xanzy/ssh-agent/BUILD.bazel @@ -51,7 +51,9 @@ go_library( "//vendor/golang.org/x/crypto/ssh/agent", ], "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/Microsoft/go-winio", "//vendor/golang.org/x/crypto/ssh/agent", + "//vendor/golang.org/x/sys/windows", ], "//conditions:default": [], }), diff --git a/vendor/github.com/xanzy/ssh-agent/README.md b/vendor/github.com/xanzy/ssh-agent/README.md index d93af40a..e2dfcedc 100644 --- a/vendor/github.com/xanzy/ssh-agent/README.md +++ b/vendor/github.com/xanzy/ssh-agent/README.md @@ -16,7 +16,7 @@ If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ ## Author -Sander van Harmelen () +Sander van Harmelen () ## License diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go index 62956079..1608e54c 100644 --- a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go +++ b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go @@ -17,6 +17,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // +//go:build windows // +build windows package sshagent @@ -31,6 +32,8 @@ import ( "sync" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) // Maximum size of message can be sent to pageant @@ -59,19 +62,19 @@ type copyData struct { var ( lock sync.Mutex - winFindWindow = winAPI("user32.dll", "FindWindowW") - winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId") - winSendMessage = winAPI("user32.dll", "SendMessageW") + user32dll = windows.NewLazySystemDLL("user32.dll") + winFindWindow = winAPI(user32dll, "FindWindowW") + winSendMessage = winAPI(user32dll, "SendMessageW") + + kernel32dll = windows.NewLazySystemDLL("kernel32.dll") + winGetCurrentThreadID = winAPI(kernel32dll, "GetCurrentThreadId") ) -func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) { - proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName) +func winAPI(dll *windows.LazyDLL, funcName string) func(...uintptr) (uintptr, uintptr, error) { + proc := dll.NewProc(funcName) return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) } } -// Available returns true if Pageant is running -func Available() bool { return pageantWindow() != 0 } - // Query sends message msg to Pageant and returns response or error. // 'msg' is raw agent request with length prefix // Response is raw agent response with length prefix diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent.go b/vendor/github.com/xanzy/ssh-agent/sshagent.go index 259fea2b..4a4ee30c 100644 --- a/vendor/github.com/xanzy/ssh-agent/sshagent.go +++ b/vendor/github.com/xanzy/ssh-agent/sshagent.go @@ -14,6 +14,7 @@ // limitations under the License. // +//go:build !windows // +build !windows package sshagent diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go index c46710e8..175d1619 100644 --- a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go +++ b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go @@ -17,6 +17,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // +//go:build windows // +build windows package sshagent @@ -27,17 +28,40 @@ import ( "net" "sync" + "github.com/Microsoft/go-winio" "golang.org/x/crypto/ssh/agent" ) +const ( + sshAgentPipe = `\\.\pipe\openssh-ssh-agent` +) + +// Available returns true if Pageant is running +func Available() bool { + if pageantWindow() != 0 { + return true + } + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return false + } + conn.Close() + return true +} + // New returns a new agent.Agent and the (custom) connection it uses // to communicate with a running pagent.exe instance (see README.md) func New() (agent.Agent, net.Conn, error) { - if !Available() { - return nil, nil, errors.New("SSH agent requested but Pageant not running") + if pageantWindow() != 0 { + return agent.NewClient(&conn{}), nil, nil } - - return agent.NewClient(&conn{}), nil, nil + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return nil, nil, errors.New( + "SSH agent requested, but could not detect Pageant or Windows native SSH agent", + ) + } + return agent.NewClient(conn), nil, nil } type conn struct { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/BUILD.bazel b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/BUILD.bazel index b51778b1..7b249a36 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/BUILD.bazel +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//vendor/go.opentelemetry.io/otel/propagation", "//vendor/go.opentelemetry.io/otel/semconv/v1.17.0:v1_17_0", "//vendor/go.opentelemetry.io/otel/trace", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//metadata", "@org_golang_google_grpc//peer", diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 06282ce7..ab091cf6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -1,20 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( + "google.golang.org/grpc/stats" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -31,18 +22,26 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) -// Filter is a predicate used to determine whether a given request in -// interceptor info should be traced. A Filter must return true if +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if // the request should be traced. -type Filter func(*InterceptorInfo) bool +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attached RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool // config is a group of options for this instrumentation. type config struct { - Filter Filter - Propagators propagation.TextMapPropagator - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider - SpanStartOptions []trace.SpanStartOption + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption ReceivedEvent bool SentEvent bool @@ -163,15 +162,30 @@ func (o tracerProviderOption) apply(c *config) { // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. -func WithInterceptorFilter(f Filter) Option { +func WithInterceptorFilter(f InterceptorFilter) Option { return interceptorFilterOption{f: f} } type interceptorFilterOption struct { - f Filter + f InterceptorFilter } func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { if o.f != nil { c.Filter = o.f } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go index 958dcd87..b8b836b0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 3b487a93..7d5ed058 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -59,7 +49,7 @@ var ( ) // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { @@ -81,7 +71,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { Method: method, Type: UnaryClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } @@ -147,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -196,7 +186,7 @@ func (w *clientStream) CloseSend() error { return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, span: span, @@ -219,7 +209,7 @@ func (w *clientStream) endSpan(err error) { } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { @@ -241,7 +231,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { Method: method, Type: StreamClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return streamer(ctx, desc, cc, method, callOpts...) } @@ -270,7 +260,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, span, cfg) + stream := wrapClientStream(s, desc, span, cfg) return stream, nil } } @@ -296,7 +286,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { UnaryServerInfo: info, Type: UnaryServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(ctx, req) } @@ -344,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } @@ -422,7 +412,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { StreamServerInfo: info, Type: StreamServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(srv, wrapServerStream(ctx, ss, cfg)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go index f6116946..b62f7cd7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index cf32a9e9..bef07b7a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index f585fb6a..3aa37915 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index b65fab30..409c621b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 73d2b8b6..201867a8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -38,6 +27,7 @@ type gRPCContext struct { messagesReceived int64 messagesSent int64 metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -77,6 +67,10 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont gctx := gRPCContext{ metricAttrs: attrs, + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -113,6 +107,10 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont gctx := gRPCContext{ metricAttrs: attrs, + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -141,6 +139,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) if gctx != nil { + if !gctx.record { + return + } metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } @@ -150,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -203,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index d633c4be..a15d06cb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.49.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/BUILD.bazel b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/BUILD.bazel index 0bbc3723..ac95c218 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/BUILD.bazel +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/BUILD.bazel @@ -18,6 +18,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/felixge/httpsnoop", + "//vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv", "//vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil", "//vendor/go.opentelemetry.io/otel", "//vendor/go.opentelemetry.io/otel/attribute", diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73..6aae83bf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cabf645a..214acaf5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5..f0a9bb9e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -111,7 +100,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c..56b24b98 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1fc15019..d01bdccf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,20 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -46,6 +34,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool + traceSemconv semconv.HTTPServer requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram @@ -67,6 +56,8 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, + + traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -143,12 +134,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -217,23 +205,36 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) + span.SetStatus(semconv.ServerStatus(rww.statusCode)) + span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: rww.statusCode, + ReadBytes: bw.read.Load(), + ReadError: bw.err, + WriteBytes: rww.written, + WriteError: rww.err, + })...) // Add metrics attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + h.responseBytesCounter.Add(ctx, rww.written, addOpts...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) @@ -241,37 +242,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) -} - // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer().Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/BUILD.bazel b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/BUILD.bazel new file mode 100644 index 00000000..fdff91e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "semconv", + srcs = [ + "env.go", + "util.go", + "v1.20.0.go", + "v1.24.0.go", + ], + importmap = "peridot.resf.org/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv", + importpath = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv", + visibility = ["//vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp:__subpackages__"], + deps = [ + "//vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil", + "//vendor/go.opentelemetry.io/otel/attribute", + "//vendor/go.opentelemetry.io/otel/codes", + "//vendor/go.opentelemetry.io/otel/semconv/v1.20.0:v1_20_0", + "//vendor/go.opentelemetry.io/otel/semconv/v1.24.0:v1_24_0", + ], +) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 00000000..3ec0ad00 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +func NewHTTPServer() HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) + return HTTPServer{duplicate: env == "http/dup"} +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 00000000..e7f29376 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 00000000..c3e838aa --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go new file mode 100644 index 00000000..0c5d4c46 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d..7aa5f99e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 0efd5261..a73bb06e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d3a06e0c..a9a9226b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a18..ea504e39 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 43e937a6..0d3cb2e4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -22,15 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -148,8 +136,10 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. @@ -181,11 +171,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.responseBytesCounter.Add(ctx, n, addOpts...) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 35254e88..b0957f28 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.49.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 2852ec97..948f8406 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -98,3 +87,13 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.ResponseWriter.WriteHeader(statusCode) } + +func (w *respWriterWrapper) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9..6bf3abc4 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb..e2cb3ea9 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f569..00000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f3..6d9c8b64 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -11,6 +11,7 @@ linters: enable: - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +22,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +128,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d204..c01e6998 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,159 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +175,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +292,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +968,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1859,7 +2013,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2849,7 +3003,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2937,3 +3095,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d9..20255493 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac5..b86572f5 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -615,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -633,6 +644,8 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6de95219..f33619f7 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +96,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -178,7 +164,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -209,23 +195,23 @@ golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,23 +220,23 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check @@ -263,15 +249,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +261,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +277,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 7766259a..5a890931 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -51,19 +51,16 @@ Currently, this project supports the following environments. |---------|------------|--------------| | Ubuntu | 1.22 | amd64 | | Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | | Ubuntu | 1.22 | 386 | | Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | | MacOS | 1.22 | amd64 | | MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | | Windows | 1.22 | amd64 | | Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | | Windows | 1.22 | 386 | | Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -100,12 +97,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0b..940f57f3 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 00000000..5b3da8f1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424..eef51ebc 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc576..318e42fc 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d..be9cd922 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271f..f2ba89ce 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e..d9a22c65 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce0..3028f9a4 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da514..bff9c7fd 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c..9ea0ecbb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 00000000..7d798435 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77..c40c896c 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -67,10 +57,10 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -232,13 +222,13 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -309,10 +299,10 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(val) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } return Member{key: key, value: value, properties: props, hasData: true}, nil } @@ -345,9 +335,9 @@ func (m Member) String() string { // A key is just an ASCII string. A value is restricted to be // US-ASCII characters excluding CTLs, whitespace, // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -616,7 +606,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(s[valueStart:valueEnd]) if err != nil { return @@ -641,6 +631,95 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,17 +735,7 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } func validateValue(s string) bool { @@ -679,12 +748,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b75..a572461a 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100d..b51d87ca 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 00000000..24c52b38 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4..df29d96a 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb..ee8db448 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e..441c5950 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad854..67414c71 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d..93e80ea3 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3b..07623b67 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3..822d8479 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408..b4f85f44 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d..3aea9c49 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e..4259f032 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b8304..c657ff8e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c20..3a0cc42f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2..adbca7d3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e..cfd1df9b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -175,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -241,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c..38560ff9 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfd..204ea142 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12..e31f442b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // @@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e7940..3e7bb3b3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5..6de7f2e4 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f9551719..1e6473b3 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 00000000..0cf902e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e..cf23db77 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf0..c82ba532 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d7..d9e3b13e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13..f153745b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 00000000..1f6e0efa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2..1a9dc680 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd53..ea52e402 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74..6a7991e0 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,37 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,6 +87,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The @@ -88,6 +101,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -98,23 +115,47 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,6 +165,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and @@ -134,6 +179,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -144,6 +193,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 00000000..bb896943 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index acc9a670..ca6fcbdc 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package noop provides an implementation of the OpenTelemetry metric API that // produces no telemetry and minimizes used computation resources. @@ -43,6 +32,8 @@ var ( _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} @@ -87,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6 return Int64Histogram{}, nil } +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { @@ -123,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric. return Float64Histogram{}, nil } +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { @@ -208,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae..8403a4ba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32..783fdfba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32..2fd94973 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 00000000..e2959ac7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1c..552263ba 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb28..33a3baf1 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f7..8c8286aa 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d622..6870e316 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 00000000..8c5ac55c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e13..ab09daf9 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 00000000..87b842c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f774..e087c9c0 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4..c7b804bb 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559d..137acc67 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c1..d318221e 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3..7e365e82 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d..634a1dce 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299..21497bb6 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 00000000..82e1f46b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c4..6685c392 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a69..0d1f55a8 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d..63776393 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfd..f40c9782 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede9..9c184063 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b90674..3d44dae2 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3..95d0210e 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 85177414..90b1b045 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/BUILD.bazel b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/BUILD.bazel new file mode 100644 index 00000000..e03dd60b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "v1_24_0", + srcs = [ + "attribute_group.go", + "doc.go", + "event.go", + "exception.go", + "metric.go", + "resource.go", + "schema.go", + "trace.go", + ], + importmap = "peridot.resf.org/vendor/go.opentelemetry.io/otel/semconv/v1.24.0", + importpath = "go.opentelemetry.io/otel/semconv/v1.24.0", + visibility = ["//visibility:public"], + deps = ["//vendor/go.opentelemetry.io/otel/attribute"], +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 00000000..0b6cbe96 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 00000000..6e688345 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 00000000..d27e8a8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 00000000..6c019aaf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 00000000..7235bb51 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 00000000..a6b953f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 00000000..d66bbe9c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 00000000..fe80b173 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 00000000..c1718234 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249d..6836c654 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 00000000..58ccaba6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66c..273d58e0 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083..5650a174 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d75..d661c5d1 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 00000000..7754a239 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a7..3e359a00 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb816..c00221e7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491c..ca20e999 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b226..28877d4a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -361,6 +350,12 @@ type Span interface { // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + // IsRecording returns the recording state of the Span. It will return // true if the Span is active and events can be recorded. IsRecording() bool diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5..20b5cf24 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh index dbb61a42..e57bf57f 100644 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -1,18 +1,7 @@ #!/bin/bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 00000000..1e87855e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1..ab289605 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e67..241cfc82 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,20 +1,9 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.28.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -40,17 +29,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.50.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.4.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.8 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/go.temporal.io/api/workflowservice/v1/BUILD.bazel b/vendor/go.temporal.io/api/workflowservice/v1/BUILD.bazel index 726c9b4a..6b65403c 100644 --- a/vendor/go.temporal.io/api/workflowservice/v1/BUILD.bazel +++ b/vendor/go.temporal.io/api/workflowservice/v1/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//vendor/go.temporal.io/api/taskqueue/v1:taskqueue", "//vendor/go.temporal.io/api/version/v1:version", "//vendor/go.temporal.io/api/workflow/v1:workflow", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], diff --git a/vendor/go.temporal.io/api/workflowservicemock/v1/BUILD.bazel b/vendor/go.temporal.io/api/workflowservicemock/v1/BUILD.bazel index bfd56905..1c5af8f5 100644 --- a/vendor/go.temporal.io/api/workflowservicemock/v1/BUILD.bazel +++ b/vendor/go.temporal.io/api/workflowservicemock/v1/BUILD.bazel @@ -9,6 +9,6 @@ go_library( deps = [ "//vendor/github.com/golang/mock/gomock", "//vendor/go.temporal.io/api/workflowservice/v1:workflowservice", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", ], ) diff --git a/vendor/go.temporal.io/sdk/internal/BUILD.bazel b/vendor/go.temporal.io/sdk/internal/BUILD.bazel index 7bf72bf4..4e54a474 100644 --- a/vendor/go.temporal.io/sdk/internal/BUILD.bazel +++ b/vendor/go.temporal.io/sdk/internal/BUILD.bazel @@ -72,7 +72,7 @@ go_library( "//vendor/go.temporal.io/sdk/log", "//vendor/go.uber.org/atomic", "//vendor/golang.org/x/time/rate", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//backoff", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//health/grpc_health_v1", diff --git a/vendor/go.temporal.io/sdk/internal/common/metrics/BUILD.bazel b/vendor/go.temporal.io/sdk/internal/common/metrics/BUILD.bazel index 27300574..f1c44e0d 100644 --- a/vendor/go.temporal.io/sdk/internal/common/metrics/BUILD.bazel +++ b/vendor/go.temporal.io/sdk/internal/common/metrics/BUILD.bazel @@ -12,5 +12,5 @@ go_library( importmap = "peridot.resf.org/vendor/go.temporal.io/sdk/internal/common/metrics", importpath = "go.temporal.io/sdk/internal/common/metrics", visibility = ["//vendor/go.temporal.io/sdk:__subpackages__"], - deps = ["@org_golang_google_grpc//:go_default_library"], + deps = ["@org_golang_google_grpc//:grpc"], ) diff --git a/vendor/go.temporal.io/sdk/internal/common/retry/BUILD.bazel b/vendor/go.temporal.io/sdk/internal/common/retry/BUILD.bazel index dd225aaa..3a922191 100644 --- a/vendor/go.temporal.io/sdk/internal/common/retry/BUILD.bazel +++ b/vendor/go.temporal.io/sdk/internal/common/retry/BUILD.bazel @@ -10,7 +10,7 @@ go_library( "//vendor/github.com/gogo/status", "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry", "//vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", ], ) diff --git a/vendor/go.uber.org/atomic/BUILD.bazel b/vendor/go.uber.org/atomic/BUILD.bazel index bc430b2f..dce1d93d 100644 --- a/vendor/go.uber.org/atomic/BUILD.bazel +++ b/vendor/go.uber.org/atomic/BUILD.bazel @@ -10,12 +10,17 @@ go_library( "duration_ext.go", "error.go", "error_ext.go", + "float32.go", + "float32_ext.go", "float64.go", "float64_ext.go", "gen.go", "int32.go", "int64.go", "nocmp.go", + "pointer_go118.go", + "pointer_go118_pre119.go", + "pointer_go119.go", "string.go", "string_ext.go", "time.go", diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 38f564e2..6f87f33f 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,33 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.11.0] - 2023-05-02 +### Fixed +- Fix initialization of `Value` wrappers. + +### Added +- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print +underlying values of pointers. + +[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 + +## [1.10.0] - 2022-08-11 +### Added +- Add `atomic.Float32` type for atomic operations on `float32`. +- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, + and `atomic.Value`. +- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any + type. This is present only for Go 1.18 or higher, and is a drop-in for + replacement for the standard library's `sync/atomic.Pointer` type. + +### Changed +- Deprecate `CAS` methods on all types in favor of corresponding + `CompareAndSwap` methods. + +Thanks to @eNV25 and @icpd for their contributions to this release. + +[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 + ## [1.9.0] - 2021-07-15 ### Added - Add `Float64.Swap` to match int atomic operations. diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 209df7bb..f0a2ddd1 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -55,8 +55,15 @@ func (x *Bool) Store(val bool) { } // CAS is an atomic compare-and-swap for bool values. +// +// Deprecated: Use CompareAndSwap. func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.v.CAS(boolToInt(old), boolToInt(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) } // Swap atomically stores the given bool and returns the old diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 207594f5..7c23868f 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -56,8 +56,15 @@ func (x *Duration) Store(val time.Duration) { } // CAS is an atomic compare-and-swap for time.Duration values. +// +// Deprecated: Use CompareAndSwap. func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.v.CAS(int64(old), int64(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return x.v.CompareAndSwap(int64(old), int64(new)) } // Swap atomically stores the given time.Duration and returns the old diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 3be19c35..b7e3f129 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -49,3 +49,24 @@ func (x *Error) Load() error { func (x *Error) Store(val error) { x.v.Store(packError(val)) } + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go index ffe0be21..d31fb633 100644 --- a/vendor/go.uber.org/atomic/error_ext.go +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,7 @@ package atomic // atomic.Value panics on nil inputs, or if the underlying type changes. // Stabilize by always storing a custom struct that we control. -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go type packedError struct{ Value error } diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go new file mode 100644 index 00000000..62c36334 --- /dev/null +++ b/vendor/go.uber.org/atomic/float32.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + x := &Float32{} + if val != _zeroFloat32 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(x.v.Load()) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + x.v.Store(math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old +// value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(x.v.Swap(math.Float32bits(val))) +} + +// MarshalJSON encodes the wrapped float32 into JSON. +func (x *Float32) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float32 from JSON. +func (x *Float32) UnmarshalJSON(b []byte) error { + var v float32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go new file mode 100644 index 00000000..b0cd8d9c --- /dev/null +++ b/vendor/go.uber.org/atomic/float32_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go + +// Add atomically adds to the wrapped float32 and returns the new value. +func (f *Float32) Add(delta float32) float32 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (f *Float32) Sub(delta float32) float32 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float32 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float32) CAS(old, new float32) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 8a136718..5bc11caa 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go index df36b010..48c52b0a 100644 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -45,21 +45,28 @@ func (f *Float64) Sub(delta float64) float64 { // CAS is an atomic compare-and-swap for float64 values. // -// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CAS allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CAS loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CAS(old, new) { -// break -// } -// } -// -// If CAS did not match NaN to match, then the above would loop forever. +// Deprecated: Use CompareAndSwap func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) } // String encodes the wrapped value as a string. diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 640ea36a..5320eac1 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int32) Dec() int32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int32) CAS(old, new int32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { return atomic.CompareAndSwapInt32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 9ab66b98..460821d0 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int64) Dec() int64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int64) CAS(old, new int64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { return atomic.CompareAndSwapInt64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go index a8201cb4..54b74174 100644 --- a/vendor/go.uber.org/atomic/nocmp.go +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -23,13 +23,13 @@ package atomic // nocmp is an uncomparable struct. Embed this inside another struct to make // it uncomparable. // -// type Foo struct { -// nocmp -// // ... -// } +// type Foo struct { +// nocmp +// // ... +// } // // This DOES NOT: // -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go new file mode 100644 index 00000000..1fb6c03b --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118.go @@ -0,0 +1,31 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package atomic + +import "fmt" + +// String returns a human readable representation of a Pointer's underlying value. +func (p *Pointer[T]) String() string { + return fmt.Sprint(p.Load()) +} diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go new file mode 100644 index 00000000..e0f47dba --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118_pre119.go @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go new file mode 100644 index 00000000..6726f17a --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go119.go @@ -0,0 +1,61 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.19 +// +build go1.19 + +package atomic + +import "sync/atomic" + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p atomic.Pointer[T] +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(v) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return p.p.Load() +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(val) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return p.p.Swap(val) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(old, new) +} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 80df93d0..061466c5 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -42,13 +42,31 @@ func NewString(val string) *String { // Load atomically loads the wrapped string. func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString + return unpackString(x.v.Load()) } // Store atomically stores the passed string. func (x *String) Store(val string) { - x.v.Store(val) + x.v.Store(packString(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return unpackString(x.v.Swap(packString(val))) } diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go index 83d92eda..019109c8 100644 --- a/vendor/go.uber.org/atomic/string_ext.go +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,9 +20,18 @@ package atomic -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go -// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which -// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} // String returns the wrapped value. func (s *String) String() string { diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go index 33460fc3..cc2a230c 100644 --- a/vendor/go.uber.org/atomic/time.go +++ b/vendor/go.uber.org/atomic/time.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index 7859a9cc..4adc294a 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint32) Dec() uint32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { return atomic.CompareAndSwapUint32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 2f2a7db6..0e2eddb3 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint64) Dec() uint64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { return atomic.CompareAndSwapUint64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go index ecf7a772..7d5b000d 100644 --- a/vendor/go.uber.org/atomic/uintptr.go +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uintptr) Dec() uintptr { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { return atomic.CompareAndSwapUintptr(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go index 169f793d..34868baf 100644 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2021-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -53,6 +53,13 @@ func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return p.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { return atomic.CompareAndSwapPointer(&p.v, old, new) } diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go index 671f3a38..52caedb9 100644 --- a/vendor/go.uber.org/atomic/value.go +++ b/vendor/go.uber.org/atomic/value.go @@ -25,7 +25,7 @@ import "sync/atomic" // Value shadows the type of the same name from sync/atomic // https://godoc.org/sync/atomic#Value type Value struct { - atomic.Value - _ nocmp // disallow non-atomic comparison + + atomic.Value } diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 00000000..6d4d1be7 --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 00000000..b9a05e3d --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/BUILD.bazel b/vendor/go.uber.org/multierr/BUILD.bazel new file mode 100644 index 00000000..dacacc5b --- /dev/null +++ b/vendor/go.uber.org/multierr/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "multierr", + srcs = [ + "error.go", + "error_post_go120.go", + "error_pre_go120.go", + ], + importmap = "peridot.resf.org/vendor/go.uber.org/multierr", + importpath = "go.uber.org/multierr", + visibility = ["//visibility:public"], +) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 00000000..f8177b97 --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,95 @@ +Releases +======== + +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + +v1.7.0 (2021-05-06) +=================== + +- Add `AppendInvoke` to append into errors from `defer` blocks. + + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/go.uber.org/multierr/LICENSE.txt similarity index 94% rename from vendor/github.com/mitchellh/go-homedir/LICENSE rename to vendor/go.uber.org/multierr/LICENSE.txt index f9c841a5..413e30f7 100644 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -1,6 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto +Copyright (c) 2017-2021 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 00000000..dcb6fe72 --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,38 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 00000000..5ab6ac40 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,43 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + +## Installation + +```bash +go get -u go.uber.org/multierr@latest +``` + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr +[doc]: https://pkg.go.dev/go.uber.org/multierr +[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 00000000..3a828b2d --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,646 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// # Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// # Appending from a loop +// +// You sometimes need to append into an error from a loop. +// +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } +// +// Cases like this may require knowledge of whether an individual instance +// failed. This usually requires introduction of a new variable. +// +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } +// +// multierr includes AppendInto to simplify cases like this. +// +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } +// +// This will append the error into the err variable, and return true if that +// individual error was non-nil. +// +// See [AppendInto] for more information. +// +// # Deferred Functions +// +// Go makes it possible to modify the return value of a function in a defer +// block if the function was using named returns. This makes it possible to +// record resource cleanup failures from deferred blocks. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// multierr provides the Invoker type and AppendInvoke function to make cases +// like the above simpler and obviate the need for a closure. The following is +// roughly equivalent to the example above. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } +// +// See [AppendInvoke] and [Invoker] for more information. +// +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + return extractErrors(err) +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a version that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} + +// Invoker is an operation that may fail with an error. Use it with +// AppendInvoke to append the result of calling the function into an error. +// This allows you to conveniently defer capture of failing operations. +// +// See also, [Close] and [Invoke]. +type Invoker interface { + Invoke() error +} + +// Invoke wraps a function which may fail with an error to match the Invoker +// interface. Use it to supply functions matching this signature to +// AppendInvoke. +// +// For example, +// +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } +// +// In this example, the following line will construct the Invoker right away, +// but defer the invocation of scanner.Err() until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +type Invoke func() error + +// Invoke calls the supplied function and returns its result. +func (i Invoke) Invoke() error { return i() } + +// Close builds an Invoker that closes the provided io.Closer. Use it with +// AppendInvoke to close io.Closers and append their results into an error. +// +// For example, +// +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } +// +// In this example, multierr.Close will construct the Invoker right away, but +// defer the invocation of f.Close until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +func Close(closer io.Closer) Invoker { + return Invoke(closer.Close) +} + +// AppendInvoke appends the result of calling the given Invoker into the +// provided error pointer. Use it with named returns to safely defer +// invocation of fallible operations until a function returns, and capture the +// resulting errors. +// +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. +// +// Without defer, AppendInvoke behaves exactly like AppendInto. +// +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// +// // ...is roughly equivalent to... +// +// err := // ... +// multierr.AppendInto(&err, foo()) +// +// The advantage of the indirection introduced by Invoker is to make it easy +// to defer the invocation of a function. Without this indirection, the +// invoked function will be evaluated at the time of the defer block rather +// than when the function returns. +// +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) +// +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// +// multierr provides a few Invoker implementations out of the box for +// convenience. See [Invoker] for more information. +func AppendInvoke(into *error, invoker Invoker) { + AppendInto(into, invoker.Invoke()) +} + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 00000000..a173f9c2 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 00000000..93872a3f --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/vendor/golang.org/x/crypto/argon2/BUILD.bazel b/vendor/golang.org/x/crypto/argon2/BUILD.bazel new file mode 100644 index 00000000..9c8e57cc --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "argon2", + srcs = [ + "argon2.go", + "blake2b.go", + "blamka_amd64.go", + "blamka_amd64.s", + "blamka_generic.go", + "blamka_ref.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/crypto/argon2", + importpath = "golang.org/x/crypto/argon2", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/crypto/blake2b", + ] + select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000..29f0a2de --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000..10f46948 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000..063e7784 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000..6713acca --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,243 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $128, DI + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, DI + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000..a481b224 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000..16d58c65 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/BUILD.bazel b/vendor/golang.org/x/crypto/blake2b/BUILD.bazel new file mode 100644 index 00000000..561e2387 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "blake2b", + srcs = [ + "blake2b.go", + "blake2bAVX2_amd64.go", + "blake2bAVX2_amd64.s", + "blake2b_amd64.s", + "blake2b_generic.go", + "blake2b_ref.go", + "blake2x.go", + "register.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/crypto/blake2b", + importpath = "golang.org/x/crypto/blake2b", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:amd64": [ + "//vendor/golang.org/x/sys/cpu", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..d2e98d42 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..199c21d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..9ae8206c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,744 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..adfac00c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,278 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..6e28668c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..54e446e1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 213bf204..08989568 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" +package blowfish // The code is a port of Bruce Schneier's C implementation. // See https://www.schneier.com/blowfish.html. diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eec..016e9021 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae2..c672ccf6 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go index 93da7322..8cf5d811 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -5,7 +5,7 @@ // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its // extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and // draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go index cda8e3ed..90ef6a24 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 10692a8a..4b0f8097 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/vendor/golang.org/x/crypto/curve25519/BUILD.bazel b/vendor/golang.org/x/crypto/curve25519/BUILD.bazel index 3d663a1d..fe1e8d45 100644 --- a/vendor/golang.org/x/crypto/curve25519/BUILD.bazel +++ b/vendor/golang.org/x/crypto/curve25519/BUILD.bazel @@ -2,13 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "curve25519", - srcs = [ - "curve25519.go", - "curve25519_compat.go", - "curve25519_go120.go", - ], + srcs = ["curve25519.go"], importmap = "peridot.resf.org/vendor/golang.org/x/crypto/curve25519", importpath = "golang.org/x/crypto/curve25519", visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/crypto/curve25519/internal/field"], ) diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 00f963ea..21ca3b2e 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -6,9 +6,11 @@ // performs scalar multiplication on the elliptic curve known as Curve25519. // See RFC 7748. // -// Starting in Go 1.20, this package is a wrapper for the X25519 implementation +// This package is a wrapper for the X25519 implementation // in the crypto/ecdh package. -package curve25519 // import "golang.org/x/crypto/curve25519" +package curve25519 + +import "crypto/ecdh" // ScalarMult sets dst to the product scalar * point. // @@ -16,7 +18,13 @@ package curve25519 // import "golang.org/x/crypto/curve25519" // zeroes, irrespective of the scalar. Instead, use the X25519 function, which // will return an error. func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } } // ScalarBaseMult sets dst to the product scalar * base where base is the @@ -25,7 +33,12 @@ func ScalarMult(dst, scalar, point *[32]byte) { // It is recommended to use the X25519 function with Basepoint instead, as // copying into fixed size arrays can lead to unexpected bugs. func ScalarBaseMult(dst, scalar *[32]byte) { - scalarBaseMult(dst, scalar) + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) } const ( @@ -57,3 +70,21 @@ func X25519(scalar, point []byte) ([]byte, error) { var dst [32]byte return x25519(&dst, scalar, point) } + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go deleted file mode 100644 index ba647e8d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package curve25519 - -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - -func scalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -func scalarBaseMult(dst, scalar *[32]byte) { - checkBasepoint() - scalarMult(dst, scalar, &basePoint) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - scalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - scalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go deleted file mode 100644 index 627df497..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package curve25519 - -import "crypto/ecdh" - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - curve := ecdh.X25519() - pub, err := curve.NewPublicKey(point) - if err != nil { - return nil, err - } - priv, err := curve.NewPrivateKey(scalar) - if err != nil { - return nil, err - } - out, err := priv.ECDH(pub) - if err != nil { - return nil, err - } - copy(dst[:], out) - return dst[:], nil -} - -func scalarMult(dst, scalar, point *[32]byte) { - if _, err := x25519(dst, scalar[:], point[:]); err != nil { - // The only error condition for x25519 when the inputs are 32 bytes long - // is if the output would have been the all-zero value. - for i := range dst { - dst[i] = 0 - } - } -} - -func scalarBaseMult(dst, scalar *[32]byte) { - curve := ecdh.X25519() - priv, err := curve.NewPrivateKey(scalar[:]) - if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") - } - copy(dst[:], priv.PublicKey().Bytes()) -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/BUILD.bazel b/vendor/golang.org/x/crypto/curve25519/internal/field/BUILD.bazel deleted file mode 100644 index bf8dc243..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "field", - srcs = [ - "fe.go", - "fe_amd64.go", - "fe_amd64.s", - "fe_amd64_noasm.go", - "fe_arm64.go", - "fe_arm64.s", - "fe_arm64_noasm.go", - "fe_generic.go", - ], - importmap = "peridot.resf.org/vendor/golang.org/x/crypto/curve25519/internal/field", - importpath = "golang.org/x/crypto/curve25519/internal/field", - visibility = ["//vendor/golang.org/x/crypto/curve25519:__subpackages__"], -) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index 70c54169..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,15 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 60817acc..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,378 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index 9da280d1..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index 075fe9b9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 3126a434..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index fc029ac1..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 2671217d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go index a7828345..59b3a95a 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -11,9 +11,7 @@ // operations with the same key more efficient. This package refers to the RFC // 8032 private key as the “seed”. // -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. +// This package is a wrapper around the standard library crypto/ed25519 package. package ed25519 import ( diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index f4ded5fe..3bee6629 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -8,7 +8,7 @@ // HKDF is a cryptographic key derivation function (KDF) with the goal of // expanding limited input keying material into one or more cryptographically // strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" +package hkdf import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/internal/alias/BUILD.bazel b/vendor/golang.org/x/crypto/internal/alias/BUILD.bazel index 62d65d6d..4dbfe32a 100644 --- a/vendor/golang.org/x/crypto/internal/alias/BUILD.bazel +++ b/vendor/golang.org/x/crypto/internal/alias/BUILD.bazel @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "alias", - srcs = ["alias.go"], + srcs = [ + "alias.go", + "alias_purego.go", + ], importmap = "peridot.resf.org/vendor/golang.org/x/crypto/internal/alias", importpath = "golang.org/x/crypto/internal/alias", visibility = ["//vendor/golang.org/x/crypto:__subpackages__"], diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index 8907183e..e664d127 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -10,14 +10,15 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" +package armor import ( "bufio" "bytes" "encoding/base64" - "golang.org/x/crypto/openpgp/errors" "io" + + "golang.org/x/crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 743b35a1..f922bdbc 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -16,7 +16,7 @@ // https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has // compatibility and security issues (see https://eprint.iacr.org/2021/923). // Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" +package elgamal import ( "crypto/rand" diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go index 1d7a0ea0..a3287494 100644 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" +package errors import ( "strconv" diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 0a19794a..a84a1a21 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" +package packet import ( "bufio" diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go index 48a89314..cff3db91 100644 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -9,7 +9,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" +package openpgp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index f53244a1..fa1a9190 100644 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -10,7 +10,7 @@ // for their specific task. If you are required to interoperate with OpenPGP // systems and need a maintained package, consider a community fork. // See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" +package s2k import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e0..28cd99c7 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/sha3/BUILD.bazel b/vendor/golang.org/x/crypto/sha3/BUILD.bazel new file mode 100644 index 00000000..b3937920 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sha3", + srcs = [ + "doc.go", + "hashes.go", + "hashes_noasm.go", + "keccakf.go", + "keccakf_amd64.go", + "keccakf_amd64.s", + "sha3.go", + "sha3_s390x.go", + "sha3_s390x.s", + "shake.go", + "shake_noasm.go", + "xor.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/crypto/sha3", + importpath = "golang.org/x/crypto/sha3", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/sys/cpu"], +) diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 00000000..7e023090 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 00000000..c544b29e --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "crypto" + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + return new224() +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + return new256() +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + return new384() +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +func new224Generic() *state { + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +func new256Generic() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +func new384Generic() *state { + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +func new512Generic() *state { + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 00000000..9d85fb62 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 00000000..ce48b1dd --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,414 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package sha3 + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 00000000..b908696b --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 00000000..1f539388 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,390 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(a *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ a+0(FP), rpState + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + RET diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 00000000..afedde5a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + i, n int // storage[i:n] is the buffer, i is only used while squeezing + storage [maxRate]byte + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the buffer indexes, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.i, d.n = 0, 0 +} + +func (d *state) clone() *state { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.storage[:d.rate]) + d.n = 0 + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + keccakF1600(&d.a) + d.i = 0 + copyOut(d, d.storage[:d.rate]) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute() { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.storage[d.n] = d.dsbyte + d.n++ + for d.n < d.rate { + d.storage[d.n] = 0 + d.n++ + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.storage[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.n = d.rate + copyOut(d, d.storage[:d.rate]) +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + written = len(p) + + for len(p) > 0 { + if d.n == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - d.n + if todo > len(p) { + todo = len(p) + } + d.n += copy(d.storage[d.n:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute() + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.storage[d.i:d.n]) + d.i += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.i == d.rate { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 00000000..00d8034a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,303 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +// +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +// +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length for full security + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + s.outputLen = 32 + case shake_256: + s.rate = 136 + s.outputLen = 64 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return new224Generic() +} + +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return new256Generic() +} + +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return new384Generic() +} + +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return new512Generic() +} + +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return newShake128Generic() +} + +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 00000000..826b862c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 00000000..1ea9275b --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "hash" + "io" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + return newShake128() +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { + return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, 32, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, 64, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 00000000..4276ba4a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 00000000..6ada5c95 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,40 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import ( + "crypto/subtle" + "encoding/binary" + "unsafe" + + "golang.org/x/sys/cpu" +) + +// xorIn xors the bytes in buf into the state. +func xorIn(d *state, buf []byte) { + if cpu.IsBigEndian { + for i := 0; len(buf) >= 8; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + subtle.XORBytes(ab[:], ab[:], buf) + } +} + +// copyOut copies uint64s to a byte buffer. +func copyOut(d *state, b []byte) { + if cpu.IsBigEndian { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + copy(b, ab[:]) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index fecba8eb..106708d2 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -10,7 +10,7 @@ // References: // // [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" +package agent import ( "bytes" diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 34bf089d..b9396101 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -71,6 +71,10 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { for auth := AuthMethod(new(noneAuth)); auth != nil; { ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { + // On disconnect, return error immediately + if _, ok := err.(*disconnectMsg); ok { + return err + } // We return the error later if there is no other method left to // try. ok = authFailure @@ -404,10 +408,10 @@ func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, e return false, err } - return confirmKeyAck(key, algo, c) + return confirmKeyAck(key, c) } -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { pubKey := key.Marshal() for { @@ -425,7 +429,15 @@ func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { + // According to RFC 4252 Section 7 the algorithm in + // SSH_MSG_USERAUTH_PK_OK should match that of the request but some + // servers send the key type instead. OpenSSH allows any algorithm + // that matches the public key, so we do the same. + // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 + if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + return false, nil + } + if !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index edbe6334..f5d352fe 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -20,4 +20,4 @@ References: This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. */ -package ssh // import "golang.org/x/crypto/ssh" +package ssh diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index df4ebdad..7967665f 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -904,6 +904,10 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return errors.New("ssh: signature did not verify") } +func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey { + return &k.PublicKey +} + type skEd25519PublicKey struct { // application is a URL-like string, typically "ssh:" for SSH. // see openssh/PROTOCOL.u2f for details. @@ -1000,6 +1004,10 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return nil } +func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, // *ecdsa.PrivateKey or any other crypto.Signer and returns a // corresponding Signer instance. ECDSA keys must use P-256, P-384 or diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c2dfe326..3ca9e89e 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + // ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries @@ -433,14 +462,42 @@ func (l ServerAuthError) Error() string { // It is returned in ServerAuthError.Errors from NewServerConn. var ErrNoAuth = errors.New("ssh: no auth passed yet") +// BannerError is an error that can be returned by authentication handlers in +// ServerConfig to send a banner message to the client. +type BannerError struct { + Err error + Message string +} + +func (b *BannerError) Unwrap() error { + return b.Err +} + +func (b *BannerError) Error() string { + if b.Err == nil { + return b.Message + } + return b.Err.Error() +} + func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions authFailures := 0 + noneAuthCount := 0 var authErrs []error var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } userAuthLoop: for { @@ -471,6 +528,11 @@ userAuthLoop: return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) } + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + s.user = userAuthReq.User if !displayedBanner && config.BannerCallback != nil { @@ -491,20 +553,18 @@ userAuthLoop: switch userAuthReq.Method { case "none": - if config.NoClientAuth { + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { if config.NoClientAuthCallback != nil { perms, authErr = config.NoClientAuthCallback(s) } else { authErr = nil } } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } case "password": - if config.PasswordCallback == nil { + if authConfig.PasswordCallback == nil { authErr = errors.New("ssh: password auth not configured") break } @@ -518,17 +578,17 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } - perms, authErr = config.PasswordCallback(s, password) + perms, authErr = authConfig.PasswordCallback(s, password) case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { + if authConfig.KeyboardInteractiveCallback == nil { authErr = errors.New("ssh: keyboard-interactive auth not configured") break } prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) case "publickey": - if config.PublicKeyCallback == nil { + if authConfig.PublicKeyCallback == nil { authErr = errors.New("ssh: publickey auth not configured") break } @@ -562,11 +622,18 @@ userAuthLoop: if !ok { candidate.user = s.user candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } } cache.add(candidate) } @@ -578,8 +645,8 @@ userAuthLoop: if len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - - if candidate.result == nil { + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { okMsg := userAuthPubKeyOkMsg{ Algo: algo, PubKey: pubKeyData, @@ -629,11 +696,11 @@ userAuthLoop: perms = candidate.perms } case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { + if authConfig.GSSAPIWithMICConfig == nil { authErr = errors.New("ssh: gssapi-with-mic auth not configured") break } - gssapiConfig := config.GSSAPIWithMICConfig + gssapiConfig := authConfig.GSSAPIWithMICConfig userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) if err != nil { return nil, parseError(msgUserAuthRequest) @@ -685,53 +752,86 @@ userAuthLoop: config.AuthLogCallback(s, userAuthReq.Method, authErr) } + var bannerErr *BannerError + if errors.As(authErr, &bannerErr) { + if bannerErr.Message != "" { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + if authErr == nil { break userAuthLoop } - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } } - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { + if authConfig.PasswordCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "password") } - if config.PublicKeyCallback != nil { + if authConfig.PublicKeyCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "publickey") } - if config.KeyboardInteractiveCallback != nil { + if authConfig.KeyboardInteractiveCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") } if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + return nil, errors.New("ssh: no authentication methods available") } if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/BUILD.bazel b/vendor/golang.org/x/exp/constraints/BUILD.bazel new file mode 100644 index 00000000..fadad8b4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "constraints", + srcs = ["constraints.go"], + importmap = "peridot.resf.org/vendor/golang.org/x/exp/constraints", + importpath = "golang.org/x/exp/constraints", + visibility = ["//visibility:public"], +) diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000..2c033dff --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/BUILD.bazel b/vendor/golang.org/x/exp/slices/BUILD.bazel new file mode 100644 index 00000000..c2a47c25 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "slices", + srcs = [ + "cmp.go", + "slices.go", + "sort.go", + "zsortanyfunc.go", + "zsortordered.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/exp/slices", + importpath = "golang.org/x/exp/slices", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/exp/constraints"], +) diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 00000000..fbf1934a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000..46ceac34 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,515 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000..f58bbc7b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,197 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 00000000..06f2c7a2 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000..99b47c39 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/exp/slog/BUILD.bazel b/vendor/golang.org/x/exp/slog/BUILD.bazel new file mode 100644 index 00000000..1ee0f706 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "slog", + srcs = [ + "attr.go", + "doc.go", + "handler.go", + "json_handler.go", + "level.go", + "logger.go", + "record.go", + "text_handler.go", + "value.go", + "value_119.go", + "value_120.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/exp/slog", + importpath = "golang.org/x/exp/slog", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/exp/slices", + "//vendor/golang.org/x/exp/slog/internal", + "//vendor/golang.org/x/exp/slog/internal/buffer", + ], +) diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go new file mode 100644 index 00000000..a180d0e1 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/attr.go @@ -0,0 +1,102 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "fmt" + "time" +) + +// An Attr is a key-value pair. +type Attr struct { + Key string + Value Value +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int converts an int to an int64 and returns +// an Attr with that value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Uint64 returns an Attr for a uint64. +func Uint64(key string, v uint64) Attr { + return Attr{key, Uint64Value(v)} +} + +// Float64 returns an Attr for a floating-point number. +func Float64(key string, v float64) Attr { + return Attr{key, Float64Value(v)} +} + +// Bool returns an Attr for a bool. +func Bool(key string, v bool) Attr { + return Attr{key, BoolValue(v)} +} + +// Time returns an Attr for a time.Time. +// It discards the monotonic portion. +func Time(key string, v time.Time) Attr { + return Attr{key, TimeValue(v)} +} + +// Duration returns an Attr for a time.Duration. +func Duration(key string, v time.Duration) Attr { + return Attr{key, DurationValue(v)} +} + +// Group returns an Attr for a Group Value. +// The first argument is the key; the remaining arguments +// are converted to Attrs as in [Logger.Log]. +// +// Use Group to collect several key-value pairs under a single +// key on a log line, or as the result of LogValue +// in order to log a single value as multiple Attrs. +func Group(key string, args ...any) Attr { + return Attr{key, GroupValue(argsToAttrSlice(args)...)} +} + +func argsToAttrSlice(args []any) []Attr { + var ( + attr Attr + attrs []Attr + ) + for len(args) > 0 { + attr, args = argsToAttr(args) + attrs = append(attrs, attr) + } + return attrs +} + +// Any returns an Attr for the supplied value. +// See [Value.AnyValue] for how values are treated. +func Any(key string, value any) Attr { + return Attr{key, AnyValue(value)} +} + +// Equal reports whether a and b have equal keys and values. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} + +func (a Attr) String() string { + return fmt.Sprintf("%s=%s", a.Key, a.Value) +} + +// isEmpty reports whether a has an empty key and a nil value. +// That can be written as Attr{} or Any("", nil). +func (a Attr) isEmpty() bool { + return a.Key == "" && a.Value.num == 0 && a.Value.any == nil +} diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go new file mode 100644 index 00000000..4beaf867 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/doc.go @@ -0,0 +1,316 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package slog provides structured logging, +in which log records include a message, +a severity level, and various other attributes +expressed as key-value pairs. + +It defines a type, [Logger], +which provides several methods (such as [Logger.Info] and [Logger.Error]) +for reporting events of interest. + +Each Logger is associated with a [Handler]. +A Logger output method creates a [Record] from the method arguments +and passes it to the Handler, which decides how to handle it. +There is a default Logger accessible through top-level functions +(such as [Info] and [Error]) that call the corresponding Logger methods. + +A log record consists of a time, a level, a message, and a set of key-value +pairs, where the keys are strings and the values may be of any type. +As an example, + + slog.Info("hello", "count", 3) + +creates a record containing the time of the call, +a level of Info, the message "hello", and a single +pair with key "count" and value 3. + +The [Info] top-level function calls the [Logger.Info] method on the default Logger. +In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. +Besides these convenience methods for common levels, +there is also a [Logger.Log] method which takes the level as an argument. +Each of these methods has a corresponding top-level function that uses the +default logger. + +The default handler formats the log record's message, time, level, and attributes +as a string and passes it to the [log] package. + + 2022/11/08 15:28:26 INFO hello count=3 + +For more control over the output format, create a logger with a different handler. +This statement uses [New] to create a new logger with a TextHandler +that writes structured records in text form to standard error: + + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + +[TextHandler] output is a sequence of key=value pairs, easily and unambiguously +parsed by machine. This statement: + + logger.Info("hello", "count", 3) + +produces this output: + + time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + +The package also provides [JSONHandler], whose output is line-delimited JSON: + + logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + logger.Info("hello", "count", 3) + +produces this output: + + {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} + +Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. +There are options for setting the minimum level (see Levels, below), +displaying the source file and line of the log call, and +modifying attributes before they are logged. + +Setting a logger as the default with + + slog.SetDefault(logger) + +will cause the top-level functions like [Info] to use it. +[SetDefault] also updates the default logger used by the [log] package, +so that existing applications that use [log.Printf] and related functions +will send log records to the logger's handler without needing to be rewritten. + +Some attributes are common to many log calls. +For example, you may wish to include the URL or trace identifier of a server request +with all log events arising from the request. +Rather than repeat the attribute with every log call, you can use [Logger.With] +to construct a new Logger containing the attributes: + + logger2 := logger.With("url", r.URL) + +The arguments to With are the same key-value pairs used in [Logger.Info]. +The result is a new Logger with the same handler as the original, but additional +attributes that will appear in the output of every call. + +# Levels + +A [Level] is an integer representing the importance or severity of a log event. +The higher the level, the more severe the event. +This package defines constants for the most common levels, +but any int can be used as a level. + +In an application, you may wish to log messages only at a certain level or greater. +One common configuration is to log messages at Info or higher levels, +suppressing debug logging until it is needed. +The built-in handlers can be configured with the minimum level to output by +setting [HandlerOptions.Level]. +The program's `main` function typically does this. +The default value is LevelInfo. + +Setting the [HandlerOptions.Level] field to a [Level] value +fixes the handler's minimum level throughout its lifetime. +Setting it to a [LevelVar] allows the level to be varied dynamically. +A LevelVar holds a Level and is safe to read or write from multiple +goroutines. +To vary the level dynamically for an entire program, first initialize +a global LevelVar: + + var programLevel = new(slog.LevelVar) // Info by default + +Then use the LevelVar to construct a handler, and make it the default: + + h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + slog.SetDefault(slog.New(h)) + +Now the program can change its logging level with a single statement: + + programLevel.Set(slog.LevelDebug) + +# Groups + +Attributes can be collected into groups. +A group has a name that is used to qualify the names of its attributes. +How this qualification is displayed depends on the handler. +[TextHandler] separates the group and attribute names with a dot. +[JSONHandler] treats each group as a separate JSON object, with the group name as the key. + +Use [Group] to create a Group attribute from a name and a list of key-value pairs: + + slog.Group("request", + "method", r.Method, + "url", r.URL) + +TextHandler would display this group as + + request.method=GET request.url=http://example.com + +JSONHandler would display it as + + "request":{"method":"GET","url":"http://example.com"} + +Use [Logger.WithGroup] to qualify all of a Logger's output +with a group name. Calling WithGroup on a Logger results in a +new Logger with the same Handler as the original, but with all +its attributes qualified by the group name. + +This can help prevent duplicate attribute keys in large systems, +where subsystems might use the same keys. +Pass each subsystem a different Logger with its own group name so that +potential duplicates are qualified: + + logger := slog.Default().With("id", systemID) + parserLogger := logger.WithGroup("parser") + parseInput(input, parserLogger) + +When parseInput logs with parserLogger, its keys will be qualified with "parser", +so even if it uses the common key "id", the log line will have distinct keys. + +# Contexts + +Some handlers may wish to include information from the [context.Context] that is +available at the call site. One example of such information +is the identifier for the current span when tracing is enabled. + +The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first +argument, as do their corresponding top-level functions. + +Although the convenience methods on Logger (Info and so on) and the +corresponding top-level functions do not take a context, the alternatives ending +in "Context" do. For example, + + slog.InfoContext(ctx, "message") + +It is recommended to pass a context to an output method if one is available. + +# Attrs and Values + +An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as +alternating keys and values. The statement + + slog.Info("hello", slog.Int("count", 3)) + +behaves the same as + + slog.Info("hello", "count", 3) + +There are convenience constructors for [Attr] such as [Int], [String], and [Bool] +for common types, as well as the function [Any] for constructing Attrs of any +type. + +The value part of an Attr is a type called [Value]. +Like an [any], a Value can hold any Go value, +but it can represent typical values, including all numbers and strings, +without an allocation. + +For the most efficient log output, use [Logger.LogAttrs]. +It is similar to [Logger.Log] but accepts only Attrs, not alternating +keys and values; this allows it, too, to avoid allocation. + +The call + + logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3)) + +is the most efficient way to achieve the same output as + + slog.Info("hello", "count", 3) + +# Customizing a type's logging behavior + +If a type implements the [LogValuer] interface, the [Value] returned from its LogValue +method is used for logging. You can use this to control how values of the type +appear in logs. For example, you can redact secret information like passwords, +or gather a struct's fields in a Group. See the examples under [LogValuer] for +details. + +A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] +method handles these cases carefully, avoiding infinite loops and unbounded recursion. +Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly. + +# Wrapping output methods + +The logger functions use reflection over the call stack to find the file name +and line number of the logging call within the application. This can produce +incorrect source information for functions that wrap slog. For instance, if you +define this function in file mylog.go: + + func Infof(format string, args ...any) { + slog.Default().Info(fmt.Sprintf(format, args...)) + } + +and you call it like this in main.go: + + Infof(slog.Default(), "hello, %s", "world") + +then slog will report the source file as mylog.go, not main.go. + +A correct implementation of Infof will obtain the source location +(pc) and pass it to NewRecord. +The Infof function in the package-level example called "wrapping" +demonstrates how to do this. + +# Working with Records + +Sometimes a Handler will need to modify a Record +before passing it on to another Handler or backend. +A Record contains a mixture of simple public fields (e.g. Time, Level, Message) +and hidden fields that refer to state (such as attributes) indirectly. This +means that modifying a simple copy of a Record (e.g. by calling +[Record.Add] or [Record.AddAttrs] to add attributes) +may have unexpected effects on the original. +Before modifying a Record, use [Clone] to +create a copy that shares no state with the original, +or create a new Record with [NewRecord] +and build up its Attrs by traversing the old ones with [Record.Attrs]. + +# Performance considerations + +If profiling your application demonstrates that logging is taking significant time, +the following suggestions may help. + +If many log lines have a common attribute, use [Logger.With] to create a Logger with +that attribute. The built-in handlers will format that attribute only once, at the +call to [Logger.With]. The [Handler] interface is designed to allow that optimization, +and a well-written Handler should take advantage of it. + +The arguments to a log call are always evaluated, even if the log event is discarded. +If possible, defer computation so that it happens only if the value is actually logged. +For example, consider the call + + slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily + +The URL.String method will be called even if the logger discards Info-level events. +Instead, pass the URL directly: + + slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + +The built-in [TextHandler] will call its String method, but only +if the log event is enabled. +Avoiding the call to String also preserves the structure of the underlying value. +For example [JSONHandler] emits the components of the parsed URL as a JSON object. +If you want to avoid eagerly paying the cost of the String call +without causing the handler to potentially inspect the structure of the value, +wrap the value in a fmt.Stringer implementation that hides its Marshal methods. + +You can also use the [LogValuer] interface to avoid unnecessary work in disabled log +calls. Say you need to log some expensive value: + + slog.Debug("frobbing", "value", computeExpensiveValue(arg)) + +Even if this line is disabled, computeExpensiveValue will be called. +To avoid that, define a type implementing LogValuer: + + type expensive struct { arg int } + + func (e expensive) LogValue() slog.Value { + return slog.AnyValue(computeExpensiveValue(e.arg)) + } + +Then use a value of that type in log calls: + + slog.Debug("frobbing", "value", expensive{arg}) + +Now computeExpensiveValue will only be called when the line is enabled. + +The built-in handlers acquire a lock before calling [io.Writer.Write] +to ensure that each record is written in one piece. User-defined +handlers are responsible for their own locking. +*/ +package slog diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go new file mode 100644 index 00000000..bd635cb8 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -0,0 +1,577 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "context" + "fmt" + "io" + "reflect" + "strconv" + "sync" + "time" + + "golang.org/x/exp/slices" + "golang.org/x/exp/slog/internal/buffer" +) + +// A Handler handles log records produced by a Logger.. +// +// A typical handler may print log records to standard error, +// or write them to a file or database, or perhaps augment them +// with additional attributes and pass them on to another handler. +// +// Any of the Handler's methods may be called concurrently with itself +// or with other methods. It is the responsibility of the Handler to +// manage this concurrency. +// +// Users of the slog package should not invoke Handler methods directly. +// They should use the methods of [Logger] instead. +type Handler interface { + // Enabled reports whether the handler handles records at the given level. + // The handler ignores records whose level is lower. + // It is called early, before any arguments are processed, + // to save effort if the log event should be discarded. + // If called from a Logger method, the first argument is the context + // passed to that method, or context.Background() if nil was passed + // or the method does not take a context. + // The context is passed so Enabled can use its values + // to make a decision. + Enabled(context.Context, Level) bool + + // Handle handles the Record. + // It will only be called when Enabled returns true. + // The Context argument is as for Enabled. + // It is present solely to provide Handlers access to the context's values. + // Canceling the context should not affect record processing. + // (Among other things, log messages may be necessary to debug a + // cancellation-related problem.) + // + // Handle methods that produce output should observe the following rules: + // - If r.Time is the zero time, ignore the time. + // - If r.PC is zero, ignore it. + // - Attr's values should be resolved. + // - If an Attr's key and value are both the zero value, ignore the Attr. + // This can be tested with attr.Equal(Attr{}). + // - If a group's key is empty, inline the group's Attrs. + // - If a group has no Attrs (even if it has a non-empty key), + // ignore it. + Handle(context.Context, Record) error + + // WithAttrs returns a new Handler whose attributes consist of + // both the receiver's attributes and the arguments. + // The Handler owns the slice: it may retain, modify or discard it. + WithAttrs(attrs []Attr) Handler + + // WithGroup returns a new Handler with the given group appended to + // the receiver's existing groups. + // The keys of all subsequent attributes, whether added by With or in a + // Record, should be qualified by the sequence of group names. + // + // How this qualification happens is up to the Handler, so long as + // this Handler's attribute keys differ from those of another Handler + // with a different sequence of group names. + // + // A Handler should treat WithGroup as starting a Group of Attrs that ends + // at the end of the log event. That is, + // + // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2)) + // + // should behave like + // + // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) + // + // If the name is empty, WithGroup returns the receiver. + WithGroup(name string) Handler +} + +type defaultHandler struct { + ch *commonHandler + // log.Output, except for testing + output func(calldepth int, message string) error +} + +func newDefaultHandler(output func(int, string) error) *defaultHandler { + return &defaultHandler{ + ch: &commonHandler{json: false}, + output: output, + } +} + +func (*defaultHandler) Enabled(_ context.Context, l Level) bool { + return l >= LevelInfo +} + +// Collect the level, attributes and message in a string and +// write it with the default log.Logger. +// Let the log.Logger handle time and file/line. +func (h *defaultHandler) Handle(ctx context.Context, r Record) error { + buf := buffer.New() + buf.WriteString(r.Level.String()) + buf.WriteByte(' ') + buf.WriteString(r.Message) + state := h.ch.newHandleState(buf, true, " ", nil) + defer state.free() + state.appendNonBuiltIns(r) + + // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output] + return h.output(4, buf.String()) +} + +func (h *defaultHandler) WithAttrs(as []Attr) Handler { + return &defaultHandler{h.ch.withAttrs(as), h.output} +} + +func (h *defaultHandler) WithGroup(name string) Handler { + return &defaultHandler{h.ch.withGroup(name), h.output} +} + +// HandlerOptions are options for a TextHandler or JSONHandler. +// A zero HandlerOptions consists entirely of default values. +type HandlerOptions struct { + // AddSource causes the handler to compute the source code position + // of the log statement and add a SourceKey attribute to the output. + AddSource bool + + // Level reports the minimum record level that will be logged. + // The handler discards records with lower levels. + // If Level is nil, the handler assumes LevelInfo. + // The handler calls Level.Level for each record processed; + // to adjust the minimum level dynamically, use a LevelVar. + Level Leveler + + // ReplaceAttr is called to rewrite each non-group attribute before it is logged. + // The attribute's value has been resolved (see [Value.Resolve]). + // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded. + // + // The built-in attributes with keys "time", "level", "source", and "msg" + // are passed to this function, except that time is omitted + // if zero, and source is omitted if AddSource is false. + // + // The first argument is a list of currently open groups that contain the + // Attr. It must not be retained or modified. ReplaceAttr is never called + // for Group attributes, only their contents. For example, the attribute + // list + // + // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3) + // + // results in consecutive calls to ReplaceAttr with the following arguments: + // + // nil, Int("a", 1) + // []string{"g"}, Int("b", 2) + // nil, Int("c", 3) + // + // ReplaceAttr can be used to change the default keys of the built-in + // attributes, convert types (for example, to replace a `time.Time` with the + // integer seconds since the Unix epoch), sanitize personal information, or + // remove attributes from the output. + ReplaceAttr func(groups []string, a Attr) Attr +} + +// Keys for "built-in" attributes. +const ( + // TimeKey is the key used by the built-in handlers for the time + // when the log method is called. The associated Value is a [time.Time]. + TimeKey = "time" + // LevelKey is the key used by the built-in handlers for the level + // of the log call. The associated value is a [Level]. + LevelKey = "level" + // MessageKey is the key used by the built-in handlers for the + // message of the log call. The associated value is a string. + MessageKey = "msg" + // SourceKey is the key used by the built-in handlers for the source file + // and line of the log call. The associated value is a string. + SourceKey = "source" +) + +type commonHandler struct { + json bool // true => output JSON; false => output text + opts HandlerOptions + preformattedAttrs []byte + groupPrefix string // for text: prefix of groups opened in preformatting + groups []string // all groups started from WithGroup + nOpenGroups int // the number of groups opened in preformattedAttrs + mu sync.Mutex + w io.Writer +} + +func (h *commonHandler) clone() *commonHandler { + // We can't use assignment because we can't copy the mutex. + return &commonHandler{ + json: h.json, + opts: h.opts, + preformattedAttrs: slices.Clip(h.preformattedAttrs), + groupPrefix: h.groupPrefix, + groups: slices.Clip(h.groups), + nOpenGroups: h.nOpenGroups, + w: h.w, + } +} + +// enabled reports whether l is greater than or equal to the +// minimum level. +func (h *commonHandler) enabled(l Level) bool { + minLevel := LevelInfo + if h.opts.Level != nil { + minLevel = h.opts.Level.Level() + } + return l >= minLevel +} + +func (h *commonHandler) withAttrs(as []Attr) *commonHandler { + h2 := h.clone() + // Pre-format the attributes as an optimization. + prefix := buffer.New() + defer prefix.Free() + prefix.WriteString(h.groupPrefix) + state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix) + defer state.free() + if len(h2.preformattedAttrs) > 0 { + state.sep = h.attrSep() + } + state.openGroups() + for _, a := range as { + state.appendAttr(a) + } + // Remember the new prefix for later keys. + h2.groupPrefix = state.prefix.String() + // Remember how many opened groups are in preformattedAttrs, + // so we don't open them again when we handle a Record. + h2.nOpenGroups = len(h2.groups) + return h2 +} + +func (h *commonHandler) withGroup(name string) *commonHandler { + if name == "" { + return h + } + h2 := h.clone() + h2.groups = append(h2.groups, name) + return h2 +} + +func (h *commonHandler) handle(r Record) error { + state := h.newHandleState(buffer.New(), true, "", nil) + defer state.free() + if h.json { + state.buf.WriteByte('{') + } + // Built-in attributes. They are not in a group. + stateGroups := state.groups + state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups. + rep := h.opts.ReplaceAttr + // time + if !r.Time.IsZero() { + key := TimeKey + val := r.Time.Round(0) // strip monotonic to match Attr behavior + if rep == nil { + state.appendKey(key) + state.appendTime(val) + } else { + state.appendAttr(Time(key, val)) + } + } + // level + key := LevelKey + val := r.Level + if rep == nil { + state.appendKey(key) + state.appendString(val.String()) + } else { + state.appendAttr(Any(key, val)) + } + // source + if h.opts.AddSource { + state.appendAttr(Any(SourceKey, r.source())) + } + key = MessageKey + msg := r.Message + if rep == nil { + state.appendKey(key) + state.appendString(msg) + } else { + state.appendAttr(String(key, msg)) + } + state.groups = stateGroups // Restore groups passed to ReplaceAttrs. + state.appendNonBuiltIns(r) + state.buf.WriteByte('\n') + + h.mu.Lock() + defer h.mu.Unlock() + _, err := h.w.Write(*state.buf) + return err +} + +func (s *handleState) appendNonBuiltIns(r Record) { + // preformatted Attrs + if len(s.h.preformattedAttrs) > 0 { + s.buf.WriteString(s.sep) + s.buf.Write(s.h.preformattedAttrs) + s.sep = s.h.attrSep() + } + // Attrs in Record -- unlike the built-in ones, they are in groups started + // from WithGroup. + s.prefix = buffer.New() + defer s.prefix.Free() + s.prefix.WriteString(s.h.groupPrefix) + s.openGroups() + r.Attrs(func(a Attr) bool { + s.appendAttr(a) + return true + }) + if s.h.json { + // Close all open groups. + for range s.h.groups { + s.buf.WriteByte('}') + } + // Close the top-level object. + s.buf.WriteByte('}') + } +} + +// attrSep returns the separator between attributes. +func (h *commonHandler) attrSep() string { + if h.json { + return "," + } + return " " +} + +// handleState holds state for a single call to commonHandler.handle. +// The initial value of sep determines whether to emit a separator +// before the next key, after which it stays true. +type handleState struct { + h *commonHandler + buf *buffer.Buffer + freeBuf bool // should buf be freed? + sep string // separator to write before next key + prefix *buffer.Buffer // for text: key prefix + groups *[]string // pool-allocated slice of active groups, for ReplaceAttr +} + +var groupPool = sync.Pool{New: func() any { + s := make([]string, 0, 10) + return &s +}} + +func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState { + s := handleState{ + h: h, + buf: buf, + freeBuf: freeBuf, + sep: sep, + prefix: prefix, + } + if h.opts.ReplaceAttr != nil { + s.groups = groupPool.Get().(*[]string) + *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...) + } + return s +} + +func (s *handleState) free() { + if s.freeBuf { + s.buf.Free() + } + if gs := s.groups; gs != nil { + *gs = (*gs)[:0] + groupPool.Put(gs) + } +} + +func (s *handleState) openGroups() { + for _, n := range s.h.groups[s.h.nOpenGroups:] { + s.openGroup(n) + } +} + +// Separator for group names and keys. +const keyComponentSep = '.' + +// openGroup starts a new group of attributes +// with the given name. +func (s *handleState) openGroup(name string) { + if s.h.json { + s.appendKey(name) + s.buf.WriteByte('{') + s.sep = "" + } else { + s.prefix.WriteString(name) + s.prefix.WriteByte(keyComponentSep) + } + // Collect group names for ReplaceAttr. + if s.groups != nil { + *s.groups = append(*s.groups, name) + } +} + +// closeGroup ends the group with the given name. +func (s *handleState) closeGroup(name string) { + if s.h.json { + s.buf.WriteByte('}') + } else { + (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */] + } + s.sep = s.h.attrSep() + if s.groups != nil { + *s.groups = (*s.groups)[:len(*s.groups)-1] + } +} + +// appendAttr appends the Attr's key and value using app. +// It handles replacement and checking for an empty key. +// after replacement). +func (s *handleState) appendAttr(a Attr) { + if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup { + var gs []string + if s.groups != nil { + gs = *s.groups + } + // Resolve before calling ReplaceAttr, so the user doesn't have to. + a.Value = a.Value.Resolve() + a = rep(gs, a) + } + a.Value = a.Value.Resolve() + // Elide empty Attrs. + if a.isEmpty() { + return + } + // Special case: Source. + if v := a.Value; v.Kind() == KindAny { + if src, ok := v.Any().(*Source); ok { + if s.h.json { + a.Value = src.group() + } else { + a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line)) + } + } + } + if a.Value.Kind() == KindGroup { + attrs := a.Value.Group() + // Output only non-empty groups. + if len(attrs) > 0 { + // Inline a group with an empty key. + if a.Key != "" { + s.openGroup(a.Key) + } + for _, aa := range attrs { + s.appendAttr(aa) + } + if a.Key != "" { + s.closeGroup(a.Key) + } + } + } else { + s.appendKey(a.Key) + s.appendValue(a.Value) + } +} + +func (s *handleState) appendError(err error) { + s.appendString(fmt.Sprintf("!ERROR:%v", err)) +} + +func (s *handleState) appendKey(key string) { + s.buf.WriteString(s.sep) + if s.prefix != nil { + // TODO: optimize by avoiding allocation. + s.appendString(string(*s.prefix) + key) + } else { + s.appendString(key) + } + if s.h.json { + s.buf.WriteByte(':') + } else { + s.buf.WriteByte('=') + } + s.sep = s.h.attrSep() +} + +func (s *handleState) appendString(str string) { + if s.h.json { + s.buf.WriteByte('"') + *s.buf = appendEscapedJSONString(*s.buf, str) + s.buf.WriteByte('"') + } else { + // text + if needsQuoting(str) { + *s.buf = strconv.AppendQuote(*s.buf, str) + } else { + s.buf.WriteString(str) + } + } +} + +func (s *handleState) appendValue(v Value) { + defer func() { + if r := recover(); r != nil { + // If it panics with a nil pointer, the most likely cases are + // an encoding.TextMarshaler or error fails to guard against nil, + // in which case "" seems to be the feasible choice. + // + // Adapted from the code in fmt/print.go. + if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { + s.appendString("") + return + } + + // Otherwise just print the original panic message. + s.appendString(fmt.Sprintf("!PANIC: %v", r)) + } + }() + + var err error + if s.h.json { + err = appendJSONValue(s, v) + } else { + err = appendTextValue(s, v) + } + if err != nil { + s.appendError(err) + } +} + +func (s *handleState) appendTime(t time.Time) { + if s.h.json { + appendJSONTime(s, t) + } else { + writeTimeRFC3339Millis(s.buf, t) + } +} + +// This takes half the time of Time.AppendFormat. +func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) { + year, month, day := t.Date() + buf.WritePosIntWidth(year, 4) + buf.WriteByte('-') + buf.WritePosIntWidth(int(month), 2) + buf.WriteByte('-') + buf.WritePosIntWidth(day, 2) + buf.WriteByte('T') + hour, min, sec := t.Clock() + buf.WritePosIntWidth(hour, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(min, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(sec, 2) + ns := t.Nanosecond() + buf.WriteByte('.') + buf.WritePosIntWidth(ns/1e6, 3) + _, offsetSeconds := t.Zone() + if offsetSeconds == 0 { + buf.WriteByte('Z') + } else { + offsetMinutes := offsetSeconds / 60 + if offsetMinutes < 0 { + buf.WriteByte('-') + offsetMinutes = -offsetMinutes + } else { + buf.WriteByte('+') + } + buf.WritePosIntWidth(offsetMinutes/60, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(offsetMinutes%60, 2) + } +} diff --git a/vendor/golang.org/x/exp/slog/internal/BUILD.bazel b/vendor/golang.org/x/exp/slog/internal/BUILD.bazel new file mode 100644 index 00000000..eef695f5 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/internal/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "internal", + srcs = ["ignorepc.go"], + importmap = "peridot.resf.org/vendor/golang.org/x/exp/slog/internal", + importpath = "golang.org/x/exp/slog/internal", + visibility = ["//vendor/golang.org/x/exp/slog:__subpackages__"], +) diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/BUILD.bazel b/vendor/golang.org/x/exp/slog/internal/buffer/BUILD.bazel new file mode 100644 index 00000000..a19c31b9 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/internal/buffer/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "buffer", + srcs = ["buffer.go"], + importmap = "peridot.resf.org/vendor/golang.org/x/exp/slog/internal/buffer", + importpath = "golang.org/x/exp/slog/internal/buffer", + visibility = ["//vendor/golang.org/x/exp/slog:__subpackages__"], +) diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go new file mode 100644 index 00000000..7786c166 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go @@ -0,0 +1,84 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buffer provides a pool-allocated byte buffer. +package buffer + +import ( + "sync" +) + +// Buffer adapted from go/src/fmt/print.go +type Buffer []byte + +// Having an initial size gives a dramatic speedup. +var bufPool = sync.Pool{ + New: func() any { + b := make([]byte, 0, 1024) + return (*Buffer)(&b) + }, +} + +func New() *Buffer { + return bufPool.Get().(*Buffer) +} + +func (b *Buffer) Free() { + // To reduce peak allocation, return only smaller buffers to the pool. + const maxBufferSize = 16 << 10 + if cap(*b) <= maxBufferSize { + *b = (*b)[:0] + bufPool.Put(b) + } +} + +func (b *Buffer) Reset() { + *b = (*b)[:0] +} + +func (b *Buffer) Write(p []byte) (int, error) { + *b = append(*b, p...) + return len(p), nil +} + +func (b *Buffer) WriteString(s string) { + *b = append(*b, s...) +} + +func (b *Buffer) WriteByte(c byte) { + *b = append(*b, c) +} + +func (b *Buffer) WritePosInt(i int) { + b.WritePosIntWidth(i, 0) +} + +// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left +// by zeroes to the given width. Use a width of 0 to omit padding. +func (b *Buffer) WritePosIntWidth(i, width int) { + // Cheap integer to fixed-width decimal ASCII. + // Copied from log/log.go. + + if i < 0 { + panic("negative int") + } + + // Assemble decimal in reverse order. + var bb [20]byte + bp := len(bb) - 1 + for i >= 10 || width > 1 { + width-- + q := i / 10 + bb[bp] = byte('0' + i - q*10) + bp-- + i = q + } + // i < 10 + bb[bp] = byte('0' + i) + b.Write(bb[bp:]) +} + +func (b *Buffer) String() string { + return string(*b) +} diff --git a/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/vendor/golang.org/x/exp/slog/internal/ignorepc.go new file mode 100644 index 00000000..d1256426 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/internal/ignorepc.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// If IgnorePC is true, do not invoke runtime.Callers to get the pc. +// This is solely for benchmarking the slowdown from runtime.Callers. +var IgnorePC = false diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go new file mode 100644 index 00000000..157ada86 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/json_handler.go @@ -0,0 +1,336 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "time" + "unicode/utf8" + + "golang.org/x/exp/slog/internal/buffer" +) + +// JSONHandler is a Handler that writes Records to an io.Writer as +// line-delimited JSON objects. +type JSONHandler struct { + *commonHandler +} + +// NewJSONHandler creates a JSONHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { + if opts == nil { + opts = &HandlerOptions{} + } + return &JSONHandler{ + &commonHandler{ + json: true, + w: w, + opts: *opts, + }, + } +} + +// Enabled reports whether the handler handles records at the given level. +// The handler ignores records whose level is lower. +func (h *JSONHandler) Enabled(_ context.Context, level Level) bool { + return h.commonHandler.enabled(level) +} + +// WithAttrs returns a new JSONHandler whose attributes consists +// of h's attributes followed by attrs. +func (h *JSONHandler) WithAttrs(attrs []Attr) Handler { + return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)} +} + +func (h *JSONHandler) WithGroup(name string) Handler { + return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)} +} + +// Handle formats its argument Record as a JSON object on a single line. +// +// If the Record's time is zero, the time is omitted. +// Otherwise, the key is "time" +// and the value is output as with json.Marshal. +// +// If the Record's level is zero, the level is omitted. +// Otherwise, the key is "level" +// and the value of [Level.String] is output. +// +// If the AddSource option is set and source information is available, +// the key is "source" +// and the value is output as "FILE:LINE". +// +// The message's key is "msg". +// +// To modify these or other attributes, or remove them from the output, use +// [HandlerOptions.ReplaceAttr]. +// +// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false), +// with two exceptions. +// +// First, an Attr whose Value is of type error is formatted as a string, by +// calling its Error method. Only errors in Attrs receive this special treatment, +// not errors embedded in structs, slices, maps or other data structures that +// are processed by the encoding/json package. +// +// Second, an encoding failure does not cause Handle to return an error. +// Instead, the error message is formatted as a string. +// +// Each call to Handle results in a single serialized call to io.Writer.Write. +func (h *JSONHandler) Handle(_ context.Context, r Record) error { + return h.commonHandler.handle(r) +} + +// Adapted from time.Time.MarshalJSON to avoid allocation. +func appendJSONTime(s *handleState, t time.Time) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + s.appendError(errors.New("time.Time year outside of range [0,9999]")) + } + s.buf.WriteByte('"') + *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano) + s.buf.WriteByte('"') +} + +func appendJSONValue(s *handleState, v Value) error { + switch v.Kind() { + case KindString: + s.appendString(v.str()) + case KindInt64: + *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10) + case KindUint64: + *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10) + case KindFloat64: + // json.Marshal is funny about floats; it doesn't + // always match strconv.AppendFloat. So just call it. + // That's expensive, but floats are rare. + if err := appendJSONMarshal(s.buf, v.Float64()); err != nil { + return err + } + case KindBool: + *s.buf = strconv.AppendBool(*s.buf, v.Bool()) + case KindDuration: + // Do what json.Marshal does. + *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10) + case KindTime: + s.appendTime(v.Time()) + case KindAny: + a := v.Any() + _, jm := a.(json.Marshaler) + if err, ok := a.(error); ok && !jm { + s.appendString(err.Error()) + } else { + return appendJSONMarshal(s.buf, a) + } + default: + panic(fmt.Sprintf("bad kind: %s", v.Kind())) + } + return nil +} + +func appendJSONMarshal(buf *buffer.Buffer, v any) error { + // Use a json.Encoder to avoid escaping HTML. + var bb bytes.Buffer + enc := json.NewEncoder(&bb) + enc.SetEscapeHTML(false) + if err := enc.Encode(v); err != nil { + return err + } + bs := bb.Bytes() + buf.Write(bs[:len(bs)-1]) // remove final newline + return nil +} + +// appendEscapedJSONString escapes s for JSON and appends it to buf. +// It does not surround the string in quotation marks. +// +// Modified from encoding/json/encode.go:encodeState.string, +// with escapeHTML set to false. +func appendEscapedJSONString(buf []byte, s string) []byte { + char := func(b byte) { buf = append(buf, b) } + str := func(s string) { buf = append(buf, s...) } + + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + str(s[start:i]) + } + char('\\') + switch b { + case '\\', '"': + char(b) + case '\n': + char('n') + case '\r': + char('r') + case '\t': + char('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + str(`u00`) + char(hex[b>>4]) + char(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + str(s[start:i]) + } + str(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + str(s[start:i]) + } + str(`\u202`) + char(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + str(s[start:]) + } + return buf +} + +var hex = "0123456789abcdef" + +// Copied from encoding/json/tables.go. +// +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} diff --git a/vendor/golang.org/x/exp/slog/level.go b/vendor/golang.org/x/exp/slog/level.go new file mode 100644 index 00000000..b2365f0a --- /dev/null +++ b/vendor/golang.org/x/exp/slog/level.go @@ -0,0 +1,201 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync/atomic" +) + +// A Level is the importance or severity of a log event. +// The higher the level, the more important or severe the event. +type Level int + +// Level numbers are inherently arbitrary, +// but we picked them to satisfy three constraints. +// Any system can map them to another numbering scheme if it wishes. +// +// First, we wanted the default level to be Info, Since Levels are ints, Info is +// the default value for int, zero. +// + +// Second, we wanted to make it easy to use levels to specify logger verbosity. +// Since a larger level means a more severe event, a logger that accepts events +// with smaller (or more negative) level means a more verbose logger. Logger +// verbosity is thus the negation of event severity, and the default verbosity +// of 0 accepts all events at least as severe as INFO. +// +// Third, we wanted some room between levels to accommodate schemes with named +// levels between ours. For example, Google Cloud Logging defines a Notice level +// between Info and Warn. Since there are only a few of these intermediate +// levels, the gap between the numbers need not be large. Our gap of 4 matches +// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the +// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog +// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog +// does not. But those OpenTelemetry levels can still be represented as slog +// Levels by using the appropriate integers. +// +// Names for common levels. +const ( + LevelDebug Level = -4 + LevelInfo Level = 0 + LevelWarn Level = 4 + LevelError Level = 8 +) + +// String returns a name for the level. +// If the level has a name, then that name +// in uppercase is returned. +// If the level is between named values, then +// an integer is appended to the uppercased name. +// Examples: +// +// LevelWarn.String() => "WARN" +// (LevelInfo+2).String() => "INFO+2" +func (l Level) String() string { + str := func(base string, val Level) string { + if val == 0 { + return base + } + return fmt.Sprintf("%s%+d", base, val) + } + + switch { + case l < LevelInfo: + return str("DEBUG", l-LevelDebug) + case l < LevelWarn: + return str("INFO", l-LevelInfo) + case l < LevelError: + return str("WARN", l-LevelWarn) + default: + return str("ERROR", l-LevelError) + } +} + +// MarshalJSON implements [encoding/json.Marshaler] +// by quoting the output of [Level.String]. +func (l Level) MarshalJSON() ([]byte, error) { + // AppendQuote is sufficient for JSON-encoding all Level strings. + // They don't contain any runes that would produce invalid JSON + // when escaped. + return strconv.AppendQuote(nil, l.String()), nil +} + +// UnmarshalJSON implements [encoding/json.Unmarshaler] +// It accepts any string produced by [Level.MarshalJSON], +// ignoring case. +// It also accepts numeric offsets that would result in a different string on +// output. For example, "Error-8" would marshal as "INFO". +func (l *Level) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return l.parse(s) +} + +// MarshalText implements [encoding.TextMarshaler] +// by calling [Level.String]. +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +// It accepts any string produced by [Level.MarshalText], +// ignoring case. +// It also accepts numeric offsets that would result in a different string on +// output. For example, "Error-8" would marshal as "INFO". +func (l *Level) UnmarshalText(data []byte) error { + return l.parse(string(data)) +} + +func (l *Level) parse(s string) (err error) { + defer func() { + if err != nil { + err = fmt.Errorf("slog: level string %q: %w", s, err) + } + }() + + name := s + offset := 0 + if i := strings.IndexAny(s, "+-"); i >= 0 { + name = s[:i] + offset, err = strconv.Atoi(s[i:]) + if err != nil { + return err + } + } + switch strings.ToUpper(name) { + case "DEBUG": + *l = LevelDebug + case "INFO": + *l = LevelInfo + case "WARN": + *l = LevelWarn + case "ERROR": + *l = LevelError + default: + return errors.New("unknown name") + } + *l += Level(offset) + return nil +} + +// Level returns the receiver. +// It implements Leveler. +func (l Level) Level() Level { return l } + +// A LevelVar is a Level variable, to allow a Handler level to change +// dynamically. +// It implements Leveler as well as a Set method, +// and it is safe for use by multiple goroutines. +// The zero LevelVar corresponds to LevelInfo. +type LevelVar struct { + val atomic.Int64 +} + +// Level returns v's level. +func (v *LevelVar) Level() Level { + return Level(int(v.val.Load())) +} + +// Set sets v's level to l. +func (v *LevelVar) Set(l Level) { + v.val.Store(int64(l)) +} + +func (v *LevelVar) String() string { + return fmt.Sprintf("LevelVar(%s)", v.Level()) +} + +// MarshalText implements [encoding.TextMarshaler] +// by calling [Level.MarshalText]. +func (v *LevelVar) MarshalText() ([]byte, error) { + return v.Level().MarshalText() +} + +// UnmarshalText implements [encoding.TextUnmarshaler] +// by calling [Level.UnmarshalText]. +func (v *LevelVar) UnmarshalText(data []byte) error { + var l Level + if err := l.UnmarshalText(data); err != nil { + return err + } + v.Set(l) + return nil +} + +// A Leveler provides a Level value. +// +// As Level itself implements Leveler, clients typically supply +// a Level value wherever a Leveler is needed, such as in HandlerOptions. +// Clients who need to vary the level dynamically can provide a more complex +// Leveler implementation such as *LevelVar. +type Leveler interface { + Level() Level +} diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go new file mode 100644 index 00000000..e87ec993 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/logger.go @@ -0,0 +1,343 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "context" + "log" + "runtime" + "sync/atomic" + "time" + + "golang.org/x/exp/slog/internal" +) + +var defaultLogger atomic.Value + +func init() { + defaultLogger.Store(New(newDefaultHandler(log.Output))) +} + +// Default returns the default Logger. +func Default() *Logger { return defaultLogger.Load().(*Logger) } + +// SetDefault makes l the default Logger. +// After this call, output from the log package's default Logger +// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. +func SetDefault(l *Logger) { + defaultLogger.Store(l) + // If the default's handler is a defaultHandler, then don't use a handleWriter, + // or we'll deadlock as they both try to acquire the log default mutex. + // The defaultHandler will use whatever the log default writer is currently + // set to, which is correct. + // This can occur with SetDefault(Default()). + // See TestSetDefault. + if _, ok := l.Handler().(*defaultHandler); !ok { + capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0 + log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC}) + log.SetFlags(0) // we want just the log message, no time or location + } +} + +// handlerWriter is an io.Writer that calls a Handler. +// It is used to link the default log.Logger to the default slog.Logger. +type handlerWriter struct { + h Handler + level Level + capturePC bool +} + +func (w *handlerWriter) Write(buf []byte) (int, error) { + if !w.h.Enabled(context.Background(), w.level) { + return 0, nil + } + var pc uintptr + if !internal.IgnorePC && w.capturePC { + // skip [runtime.Callers, w.Write, Logger.Output, log.Print] + var pcs [1]uintptr + runtime.Callers(4, pcs[:]) + pc = pcs[0] + } + + // Remove final newline. + origLen := len(buf) // Report that the entire buf was written. + if len(buf) > 0 && buf[len(buf)-1] == '\n' { + buf = buf[:len(buf)-1] + } + r := NewRecord(time.Now(), w.level, string(buf), pc) + return origLen, w.h.Handle(context.Background(), r) +} + +// A Logger records structured information about each call to its +// Log, Debug, Info, Warn, and Error methods. +// For each call, it creates a Record and passes it to a Handler. +// +// To create a new Logger, call [New] or a Logger method +// that begins "With". +type Logger struct { + handler Handler // for structured logging +} + +func (l *Logger) clone() *Logger { + c := *l + return &c +} + +// Handler returns l's Handler. +func (l *Logger) Handler() Handler { return l.handler } + +// With returns a new Logger that includes the given arguments, converted to +// Attrs as in [Logger.Log]. +// The Attrs will be added to each output from the Logger. +// The new Logger shares the old Logger's context. +// The new Logger's handler is the result of calling WithAttrs on the receiver's +// handler. +func (l *Logger) With(args ...any) *Logger { + c := l.clone() + c.handler = l.handler.WithAttrs(argsToAttrSlice(args)) + return c +} + +// WithGroup returns a new Logger that starts a group. The keys of all +// attributes added to the Logger will be qualified by the given name. +// (How that qualification happens depends on the [Handler.WithGroup] +// method of the Logger's Handler.) +// The new Logger shares the old Logger's context. +// +// The new Logger's handler is the result of calling WithGroup on the receiver's +// handler. +func (l *Logger) WithGroup(name string) *Logger { + c := l.clone() + c.handler = l.handler.WithGroup(name) + return c + +} + +// New creates a new Logger with the given non-nil Handler and a nil context. +func New(h Handler) *Logger { + if h == nil { + panic("nil Handler") + } + return &Logger{handler: h} +} + +// With calls Logger.With on the default logger. +func With(args ...any) *Logger { + return Default().With(args...) +} + +// Enabled reports whether l emits log records at the given context and level. +func (l *Logger) Enabled(ctx context.Context, level Level) bool { + if ctx == nil { + ctx = context.Background() + } + return l.Handler().Enabled(ctx, level) +} + +// NewLogLogger returns a new log.Logger such that each call to its Output method +// dispatches a Record to the specified handler. The logger acts as a bridge from +// the older log API to newer structured logging handlers. +func NewLogLogger(h Handler, level Level) *log.Logger { + return log.New(&handlerWriter{h, level, true}, "", 0) +} + +// Log emits a log record with the current time and the given level and message. +// The Record's Attrs consist of the Logger's attributes followed by +// the Attrs specified by args. +// +// The attribute arguments are processed as follows: +// - If an argument is an Attr, it is used as is. +// - If an argument is a string and this is not the last argument, +// the following argument is treated as the value and the two are combined +// into an Attr. +// - Otherwise, the argument is treated as a value with key "!BADKEY". +func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) { + l.log(ctx, level, msg, args...) +} + +// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. +func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { + l.logAttrs(ctx, level, msg, attrs...) +} + +// Debug logs at LevelDebug. +func (l *Logger) Debug(msg string, args ...any) { + l.log(nil, LevelDebug, msg, args...) +} + +// DebugContext logs at LevelDebug with the given context. +func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelDebug, msg, args...) +} + +// DebugCtx logs at LevelDebug with the given context. +// Deprecated: Use Logger.DebugContext. +func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelDebug, msg, args...) +} + +// Info logs at LevelInfo. +func (l *Logger) Info(msg string, args ...any) { + l.log(nil, LevelInfo, msg, args...) +} + +// InfoContext logs at LevelInfo with the given context. +func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelInfo, msg, args...) +} + +// InfoCtx logs at LevelInfo with the given context. +// Deprecated: Use Logger.InfoContext. +func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelInfo, msg, args...) +} + +// Warn logs at LevelWarn. +func (l *Logger) Warn(msg string, args ...any) { + l.log(nil, LevelWarn, msg, args...) +} + +// WarnContext logs at LevelWarn with the given context. +func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelWarn, msg, args...) +} + +// WarnCtx logs at LevelWarn with the given context. +// Deprecated: Use Logger.WarnContext. +func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelWarn, msg, args...) +} + +// Error logs at LevelError. +func (l *Logger) Error(msg string, args ...any) { + l.log(nil, LevelError, msg, args...) +} + +// ErrorContext logs at LevelError with the given context. +func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelError, msg, args...) +} + +// ErrorCtx logs at LevelError with the given context. +// Deprecated: Use Logger.ErrorContext. +func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelError, msg, args...) +} + +// log is the low-level logging method for methods that take ...any. +// It must always be called directly by an exported logging method +// or function, because it uses a fixed call depth to obtain the pc. +func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) { + if !l.Enabled(ctx, level) { + return + } + var pc uintptr + if !internal.IgnorePC { + var pcs [1]uintptr + // skip [runtime.Callers, this function, this function's caller] + runtime.Callers(3, pcs[:]) + pc = pcs[0] + } + r := NewRecord(time.Now(), level, msg, pc) + r.Add(args...) + if ctx == nil { + ctx = context.Background() + } + _ = l.Handler().Handle(ctx, r) +} + +// logAttrs is like [Logger.log], but for methods that take ...Attr. +func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { + if !l.Enabled(ctx, level) { + return + } + var pc uintptr + if !internal.IgnorePC { + var pcs [1]uintptr + // skip [runtime.Callers, this function, this function's caller] + runtime.Callers(3, pcs[:]) + pc = pcs[0] + } + r := NewRecord(time.Now(), level, msg, pc) + r.AddAttrs(attrs...) + if ctx == nil { + ctx = context.Background() + } + _ = l.Handler().Handle(ctx, r) +} + +// Debug calls Logger.Debug on the default logger. +func Debug(msg string, args ...any) { + Default().log(nil, LevelDebug, msg, args...) +} + +// DebugContext calls Logger.DebugContext on the default logger. +func DebugContext(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelDebug, msg, args...) +} + +// Info calls Logger.Info on the default logger. +func Info(msg string, args ...any) { + Default().log(nil, LevelInfo, msg, args...) +} + +// InfoContext calls Logger.InfoContext on the default logger. +func InfoContext(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelInfo, msg, args...) +} + +// Warn calls Logger.Warn on the default logger. +func Warn(msg string, args ...any) { + Default().log(nil, LevelWarn, msg, args...) +} + +// WarnContext calls Logger.WarnContext on the default logger. +func WarnContext(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelWarn, msg, args...) +} + +// Error calls Logger.Error on the default logger. +func Error(msg string, args ...any) { + Default().log(nil, LevelError, msg, args...) +} + +// ErrorContext calls Logger.ErrorContext on the default logger. +func ErrorContext(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelError, msg, args...) +} + +// DebugCtx calls Logger.DebugContext on the default logger. +// Deprecated: call DebugContext. +func DebugCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelDebug, msg, args...) +} + +// InfoCtx calls Logger.InfoContext on the default logger. +// Deprecated: call InfoContext. +func InfoCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelInfo, msg, args...) +} + +// WarnCtx calls Logger.WarnContext on the default logger. +// Deprecated: call WarnContext. +func WarnCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelWarn, msg, args...) +} + +// ErrorCtx calls Logger.ErrorContext on the default logger. +// Deprecated: call ErrorContext. +func ErrorCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelError, msg, args...) +} + +// Log calls Logger.Log on the default logger. +func Log(ctx context.Context, level Level, msg string, args ...any) { + Default().log(ctx, level, msg, args...) +} + +// LogAttrs calls Logger.LogAttrs on the default logger. +func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { + Default().logAttrs(ctx, level, msg, attrs...) +} diff --git a/vendor/golang.org/x/exp/slog/noplog.bench b/vendor/golang.org/x/exp/slog/noplog.bench new file mode 100644 index 00000000..ed9296ff --- /dev/null +++ b/vendor/golang.org/x/exp/slog/noplog.bench @@ -0,0 +1,36 @@ +goos: linux +goarch: amd64 +pkg: golang.org/x/exp/slog +cpu: Intel(R) Xeon(R) CPU @ 2.20GHz +BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op +BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op +PASS +ok golang.org/x/exp/slog 40.566s diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go new file mode 100644 index 00000000..38b3440f --- /dev/null +++ b/vendor/golang.org/x/exp/slog/record.go @@ -0,0 +1,207 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "runtime" + "time" + + "golang.org/x/exp/slices" +) + +const nAttrsInline = 5 + +// A Record holds information about a log event. +// Copies of a Record share state. +// Do not modify a Record after handing out a copy to it. +// Use [Record.Clone] to create a copy with no shared state. +type Record struct { + // The time at which the output method (Log, Info, etc.) was called. + Time time.Time + + // The log message. + Message string + + // The level of the event. + Level Level + + // The program counter at the time the record was constructed, as determined + // by runtime.Callers. If zero, no program counter is available. + // + // The only valid use for this value is as an argument to + // [runtime.CallersFrames]. In particular, it must not be passed to + // [runtime.FuncForPC]. + PC uintptr + + // Allocation optimization: an inline array sized to hold + // the majority of log calls (based on examination of open-source + // code). It holds the start of the list of Attrs. + front [nAttrsInline]Attr + + // The number of Attrs in front. + nFront int + + // The list of Attrs except for those in front. + // Invariants: + // - len(back) > 0 iff nFront == len(front) + // - Unused array elements are zero. Used to detect mistakes. + back []Attr +} + +// NewRecord creates a Record from the given arguments. +// Use [Record.AddAttrs] to add attributes to the Record. +// +// NewRecord is intended for logging APIs that want to support a [Handler] as +// a backend. +func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { + return Record{ + Time: t, + Message: msg, + Level: level, + PC: pc, + } +} + +// Clone returns a copy of the record with no shared state. +// The original record and the clone can both be modified +// without interfering with each other. +func (r Record) Clone() Record { + r.back = slices.Clip(r.back) // prevent append from mutating shared array + return r +} + +// NumAttrs returns the number of attributes in the Record. +func (r Record) NumAttrs() int { + return r.nFront + len(r.back) +} + +// Attrs calls f on each Attr in the Record. +// Iteration stops if f returns false. +func (r Record) Attrs(f func(Attr) bool) { + for i := 0; i < r.nFront; i++ { + if !f(r.front[i]) { + return + } + } + for _, a := range r.back { + if !f(a) { + return + } + } +} + +// AddAttrs appends the given Attrs to the Record's list of Attrs. +func (r *Record) AddAttrs(attrs ...Attr) { + n := copy(r.front[r.nFront:], attrs) + r.nFront += n + // Check if a copy was modified by slicing past the end + // and seeing if the Attr there is non-zero. + if cap(r.back) > len(r.back) { + end := r.back[:len(r.back)+1][len(r.back)] + if !end.isEmpty() { + panic("copies of a slog.Record were both modified") + } + } + r.back = append(r.back, attrs[n:]...) +} + +// Add converts the args to Attrs as described in [Logger.Log], +// then appends the Attrs to the Record's list of Attrs. +func (r *Record) Add(args ...any) { + var a Attr + for len(args) > 0 { + a, args = argsToAttr(args) + if r.nFront < len(r.front) { + r.front[r.nFront] = a + r.nFront++ + } else { + if r.back == nil { + r.back = make([]Attr, 0, countAttrs(args)) + } + r.back = append(r.back, a) + } + } + +} + +// countAttrs returns the number of Attrs that would be created from args. +func countAttrs(args []any) int { + n := 0 + for i := 0; i < len(args); i++ { + n++ + if _, ok := args[i].(string); ok { + i++ + } + } + return n +} + +const badKey = "!BADKEY" + +// argsToAttr turns a prefix of the nonempty args slice into an Attr +// and returns the unconsumed portion of the slice. +// If args[0] is an Attr, it returns it. +// If args[0] is a string, it treats the first two elements as +// a key-value pair. +// Otherwise, it treats args[0] as a value with a missing key. +func argsToAttr(args []any) (Attr, []any) { + switch x := args[0].(type) { + case string: + if len(args) == 1 { + return String(badKey, x), nil + } + return Any(x, args[1]), args[2:] + + case Attr: + return x, args[1:] + + default: + return Any(badKey, x), args[1:] + } +} + +// Source describes the location of a line of source code. +type Source struct { + // Function is the package path-qualified function name containing the + // source line. If non-empty, this string uniquely identifies a single + // function in the program. This may be the empty string if not known. + Function string `json:"function"` + // File and Line are the file name and line number (1-based) of the source + // line. These may be the empty string and zero, respectively, if not known. + File string `json:"file"` + Line int `json:"line"` +} + +// attrs returns the non-zero fields of s as a slice of attrs. +// It is similar to a LogValue method, but we don't want Source +// to implement LogValuer because it would be resolved before +// the ReplaceAttr function was called. +func (s *Source) group() Value { + var as []Attr + if s.Function != "" { + as = append(as, String("function", s.Function)) + } + if s.File != "" { + as = append(as, String("file", s.File)) + } + if s.Line != 0 { + as = append(as, Int("line", s.Line)) + } + return GroupValue(as...) +} + +// source returns a Source for the log event. +// If the Record was created without the necessary information, +// or if the location is unavailable, it returns a non-nil *Source +// with zero fields. +func (r Record) source() *Source { + fs := runtime.CallersFrames([]uintptr{r.PC}) + f, _ := fs.Next() + return &Source{ + Function: f.Function, + File: f.File, + Line: f.Line, + } +} diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go new file mode 100644 index 00000000..75b66b71 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/text_handler.go @@ -0,0 +1,161 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "context" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "unicode" + "unicode/utf8" +) + +// TextHandler is a Handler that writes Records to an io.Writer as a +// sequence of key=value pairs separated by spaces and followed by a newline. +type TextHandler struct { + *commonHandler +} + +// NewTextHandler creates a TextHandler that writes to w, +// using the given options. +// If opts is nil, the default options are used. +func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { + if opts == nil { + opts = &HandlerOptions{} + } + return &TextHandler{ + &commonHandler{ + json: false, + w: w, + opts: *opts, + }, + } +} + +// Enabled reports whether the handler handles records at the given level. +// The handler ignores records whose level is lower. +func (h *TextHandler) Enabled(_ context.Context, level Level) bool { + return h.commonHandler.enabled(level) +} + +// WithAttrs returns a new TextHandler whose attributes consists +// of h's attributes followed by attrs. +func (h *TextHandler) WithAttrs(attrs []Attr) Handler { + return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)} +} + +func (h *TextHandler) WithGroup(name string) Handler { + return &TextHandler{commonHandler: h.commonHandler.withGroup(name)} +} + +// Handle formats its argument Record as a single line of space-separated +// key=value items. +// +// If the Record's time is zero, the time is omitted. +// Otherwise, the key is "time" +// and the value is output in RFC3339 format with millisecond precision. +// +// If the Record's level is zero, the level is omitted. +// Otherwise, the key is "level" +// and the value of [Level.String] is output. +// +// If the AddSource option is set and source information is available, +// the key is "source" and the value is output as FILE:LINE. +// +// The message's key is "msg". +// +// To modify these or other attributes, or remove them from the output, use +// [HandlerOptions.ReplaceAttr]. +// +// If a value implements [encoding.TextMarshaler], the result of MarshalText is +// written. Otherwise, the result of fmt.Sprint is written. +// +// Keys and values are quoted with [strconv.Quote] if they contain Unicode space +// characters, non-printing characters, '"' or '='. +// +// Keys inside groups consist of components (keys or group names) separated by +// dots. No further escaping is performed. +// Thus there is no way to determine from the key "a.b.c" whether there +// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c", +// or single group "a" and a key "b.c". +// If it is necessary to reconstruct the group structure of a key +// even in the presence of dots inside components, use +// [HandlerOptions.ReplaceAttr] to encode that information in the key. +// +// Each call to Handle results in a single serialized call to +// io.Writer.Write. +func (h *TextHandler) Handle(_ context.Context, r Record) error { + return h.commonHandler.handle(r) +} + +func appendTextValue(s *handleState, v Value) error { + switch v.Kind() { + case KindString: + s.appendString(v.str()) + case KindTime: + s.appendTime(v.time()) + case KindAny: + if tm, ok := v.any.(encoding.TextMarshaler); ok { + data, err := tm.MarshalText() + if err != nil { + return err + } + // TODO: avoid the conversion to string. + s.appendString(string(data)) + return nil + } + if bs, ok := byteSlice(v.any); ok { + // As of Go 1.19, this only allocates for strings longer than 32 bytes. + s.buf.WriteString(strconv.Quote(string(bs))) + return nil + } + s.appendString(fmt.Sprintf("%+v", v.Any())) + default: + *s.buf = v.append(*s.buf) + } + return nil +} + +// byteSlice returns its argument as a []byte if the argument's +// underlying type is []byte, along with a second return value of true. +// Otherwise it returns nil, false. +func byteSlice(a any) ([]byte, bool) { + if bs, ok := a.([]byte); ok { + return bs, true + } + // Like Printf's %s, we allow both the slice type and the byte element type to be named. + t := reflect.TypeOf(a) + if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + return reflect.ValueOf(a).Bytes(), true + } + return nil, false +} + +func needsQuoting(s string) bool { + if len(s) == 0 { + return true + } + for i := 0; i < len(s); { + b := s[i] + if b < utf8.RuneSelf { + // Quote anything except a backslash that would need quoting in a + // JSON string, as well as space and '=' + if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) { + return true + } + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) { + return true + } + i += size + } + return false +} diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go new file mode 100644 index 00000000..3550c46f --- /dev/null +++ b/vendor/golang.org/x/exp/slog/value.go @@ -0,0 +1,456 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "fmt" + "math" + "runtime" + "strconv" + "strings" + "time" + "unsafe" + + "golang.org/x/exp/slices" +) + +// A Value can represent any Go value, but unlike type any, +// it can represent most small values without an allocation. +// The zero Value corresponds to nil. +type Value struct { + _ [0]func() // disallow == + // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, + // the string length for KindString, and nanoseconds since the epoch for KindTime. + num uint64 + // If any is of type Kind, then the value is in num as described above. + // If any is of type *time.Location, then the Kind is Time and time.Time value + // can be constructed from the Unix nanos in num and the location (monotonic time + // is not preserved). + // If any is of type stringptr, then the Kind is String and the string value + // consists of the length in num and the pointer in any. + // Otherwise, the Kind is Any and any is the value. + // (This implies that Attrs cannot store values of type Kind, *time.Location + // or stringptr.) + any any +} + +// Kind is the kind of a Value. +type Kind int + +// The following list is sorted alphabetically, but it's also important that +// KindAny is 0 so that a zero Value represents nil. + +const ( + KindAny Kind = iota + KindBool + KindDuration + KindFloat64 + KindInt64 + KindString + KindTime + KindUint64 + KindGroup + KindLogValuer +) + +var kindStrings = []string{ + "Any", + "Bool", + "Duration", + "Float64", + "Int64", + "String", + "Time", + "Uint64", + "Group", + "LogValuer", +} + +func (k Kind) String() string { + if k >= 0 && int(k) < len(kindStrings) { + return kindStrings[k] + } + return "" +} + +// Unexported version of Kind, just so we can store Kinds in Values. +// (No user-provided value has this type.) +type kind Kind + +// Kind returns v's Kind. +func (v Value) Kind() Kind { + switch x := v.any.(type) { + case Kind: + return x + case stringptr: + return KindString + case timeLocation: + return KindTime + case groupptr: + return KindGroup + case LogValuer: + return KindLogValuer + case kind: // a kind is just a wrapper for a Kind + return KindAny + default: + return KindAny + } +} + +//////////////// Constructors + +// IntValue returns a Value for an int. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// Int64Value returns a Value for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: KindInt64} +} + +// Uint64Value returns a Value for a uint64. +func Uint64Value(v uint64) Value { + return Value{num: v, any: KindUint64} +} + +// Float64Value returns a Value for a floating-point number. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: KindFloat64} +} + +// BoolValue returns a Value for a bool. +func BoolValue(v bool) Value { + u := uint64(0) + if v { + u = 1 + } + return Value{num: u, any: KindBool} +} + +// Unexported version of *time.Location, just so we can store *time.Locations in +// Values. (No user-provided value has this type.) +type timeLocation *time.Location + +// TimeValue returns a Value for a time.Time. +// It discards the monotonic portion. +func TimeValue(v time.Time) Value { + if v.IsZero() { + // UnixNano on the zero time is undefined, so represent the zero time + // with a nil *time.Location instead. time.Time.Location method never + // returns nil, so a Value with any == timeLocation(nil) cannot be + // mistaken for any other Value, time.Time or otherwise. + return Value{any: timeLocation(nil)} + } + return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())} +} + +// DurationValue returns a Value for a time.Duration. +func DurationValue(v time.Duration) Value { + return Value{num: uint64(v.Nanoseconds()), any: KindDuration} +} + +// AnyValue returns a Value for the supplied value. +// +// If the supplied value is of type Value, it is returned +// unmodified. +// +// Given a value of one of Go's predeclared string, bool, or +// (non-complex) numeric types, AnyValue returns a Value of kind +// String, Bool, Uint64, Int64, or Float64. The width of the +// original numeric type is not preserved. +// +// Given a time.Time or time.Duration value, AnyValue returns a Value of kind +// KindTime or KindDuration. The monotonic time is not preserved. +// +// For nil, or values of all other types, including named types whose +// underlying type is numeric, AnyValue returns a value of kind KindAny. +func AnyValue(v any) Value { + switch v := v.(type) { + case string: + return StringValue(v) + case int: + return Int64Value(int64(v)) + case uint: + return Uint64Value(uint64(v)) + case int64: + return Int64Value(v) + case uint64: + return Uint64Value(v) + case bool: + return BoolValue(v) + case time.Duration: + return DurationValue(v) + case time.Time: + return TimeValue(v) + case uint8: + return Uint64Value(uint64(v)) + case uint16: + return Uint64Value(uint64(v)) + case uint32: + return Uint64Value(uint64(v)) + case uintptr: + return Uint64Value(uint64(v)) + case int8: + return Int64Value(int64(v)) + case int16: + return Int64Value(int64(v)) + case int32: + return Int64Value(int64(v)) + case float64: + return Float64Value(v) + case float32: + return Float64Value(float64(v)) + case []Attr: + return GroupValue(v...) + case Kind: + return Value{any: kind(v)} + case Value: + return v + default: + return Value{any: v} + } +} + +//////////////// Accessors + +// Any returns v's value as an any. +func (v Value) Any() any { + switch v.Kind() { + case KindAny: + if k, ok := v.any.(kind); ok { + return Kind(k) + } + return v.any + case KindLogValuer: + return v.any + case KindGroup: + return v.group() + case KindInt64: + return int64(v.num) + case KindUint64: + return v.num + case KindFloat64: + return v.float() + case KindString: + return v.str() + case KindBool: + return v.bool() + case KindDuration: + return v.duration() + case KindTime: + return v.time() + default: + panic(fmt.Sprintf("bad kind: %s", v.Kind())) + } +} + +// Int64 returns v's value as an int64. It panics +// if v is not a signed integer. +func (v Value) Int64() int64 { + if g, w := v.Kind(), KindInt64; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + return int64(v.num) +} + +// Uint64 returns v's value as a uint64. It panics +// if v is not an unsigned integer. +func (v Value) Uint64() uint64 { + if g, w := v.Kind(), KindUint64; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + return v.num +} + +// Bool returns v's value as a bool. It panics +// if v is not a bool. +func (v Value) Bool() bool { + if g, w := v.Kind(), KindBool; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + return v.bool() +} + +func (v Value) bool() bool { + return v.num == 1 +} + +// Duration returns v's value as a time.Duration. It panics +// if v is not a time.Duration. +func (v Value) Duration() time.Duration { + if g, w := v.Kind(), KindDuration; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + + return v.duration() +} + +func (v Value) duration() time.Duration { + return time.Duration(int64(v.num)) +} + +// Float64 returns v's value as a float64. It panics +// if v is not a float64. +func (v Value) Float64() float64 { + if g, w := v.Kind(), KindFloat64; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + + return v.float() +} + +func (v Value) float() float64 { + return math.Float64frombits(v.num) +} + +// Time returns v's value as a time.Time. It panics +// if v is not a time.Time. +func (v Value) Time() time.Time { + if g, w := v.Kind(), KindTime; g != w { + panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) + } + return v.time() +} + +func (v Value) time() time.Time { + loc := v.any.(timeLocation) + if loc == nil { + return time.Time{} + } + return time.Unix(0, int64(v.num)).In(loc) +} + +// LogValuer returns v's value as a LogValuer. It panics +// if v is not a LogValuer. +func (v Value) LogValuer() LogValuer { + return v.any.(LogValuer) +} + +// Group returns v's value as a []Attr. +// It panics if v's Kind is not KindGroup. +func (v Value) Group() []Attr { + if sp, ok := v.any.(groupptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + panic("Group: bad kind") +} + +func (v Value) group() []Attr { + return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num) +} + +//////////////// Other + +// Equal reports whether v and w represent the same Go value. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case KindInt64, KindUint64, KindBool, KindDuration: + return v.num == w.num + case KindString: + return v.str() == w.str() + case KindFloat64: + return v.float() == w.float() + case KindTime: + return v.time().Equal(w.time()) + case KindAny, KindLogValuer: + return v.any == w.any // may panic if non-comparable + case KindGroup: + return slices.EqualFunc(v.group(), w.group(), Attr.Equal) + default: + panic(fmt.Sprintf("bad kind: %s", k1)) + } +} + +// append appends a text representation of v to dst. +// v is formatted as with fmt.Sprint. +func (v Value) append(dst []byte) []byte { + switch v.Kind() { + case KindString: + return append(dst, v.str()...) + case KindInt64: + return strconv.AppendInt(dst, int64(v.num), 10) + case KindUint64: + return strconv.AppendUint(dst, v.num, 10) + case KindFloat64: + return strconv.AppendFloat(dst, v.float(), 'g', -1, 64) + case KindBool: + return strconv.AppendBool(dst, v.bool()) + case KindDuration: + return append(dst, v.duration().String()...) + case KindTime: + return append(dst, v.time().String()...) + case KindGroup: + return fmt.Append(dst, v.group()) + case KindAny, KindLogValuer: + return fmt.Append(dst, v.any) + default: + panic(fmt.Sprintf("bad kind: %s", v.Kind())) + } +} + +// A LogValuer is any Go value that can convert itself into a Value for logging. +// +// This mechanism may be used to defer expensive operations until they are +// needed, or to expand a single value into a sequence of components. +type LogValuer interface { + LogValue() Value +} + +const maxLogValues = 100 + +// Resolve repeatedly calls LogValue on v while it implements LogValuer, +// and returns the result. +// If v resolves to a group, the group's attributes' values are not recursively +// resolved. +// If the number of LogValue calls exceeds a threshold, a Value containing an +// error is returned. +// Resolve's return value is guaranteed not to be of Kind KindLogValuer. +func (v Value) Resolve() (rv Value) { + orig := v + defer func() { + if r := recover(); r != nil { + rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5))) + } + }() + + for i := 0; i < maxLogValues; i++ { + if v.Kind() != KindLogValuer { + return v + } + v = v.LogValuer().LogValue() + } + err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any()) + return AnyValue(err) +} + +func stack(skip, nFrames int) string { + pcs := make([]uintptr, nFrames+1) + n := runtime.Callers(skip+1, pcs) + if n == 0 { + return "(no stack)" + } + frames := runtime.CallersFrames(pcs[:n]) + var b strings.Builder + i := 0 + for { + frame, more := frames.Next() + fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line) + if !more { + break + } + i++ + if i >= nFrames { + fmt.Fprintf(&b, "(rest of stack elided)\n") + break + } + } + return b.String() +} diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go new file mode 100644 index 00000000..29b0d732 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/value_119.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 && !go1.20 + +package slog + +import ( + "reflect" + "unsafe" +) + +type ( + stringptr unsafe.Pointer // used in Value.any when the Value is a string + groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr +) + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&value)) + return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)} +} + +func (v Value) str() string { + var s string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + hdr.Data = uintptr(v.any.(stringptr)) + hdr.Len = int(v.num) + return s +} + +// String returns Value's value as a string, formatted like fmt.Sprint. Unlike +// the methods Int64, Float64, and so on, which panic if v is of the +// wrong kind, String never panics. +func (v Value) String() string { + if sp, ok := v.any.(stringptr); ok { + // Inlining this code makes a huge difference. + var s string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + hdr.Data = uintptr(sp) + hdr.Len = int(v.num) + return s + } + return string(v.append(nil)) +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as)) + return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)} +} diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go new file mode 100644 index 00000000..f7d4c093 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/value_120.go @@ -0,0 +1,39 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package slog + +import "unsafe" + +type ( + stringptr *byte // used in Value.any when the Value is a string + groupptr *Attr // used in Value.any when the Value is a []Attr +) + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))} +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))} +} + +// String returns Value's value as a string, formatted like fmt.Sprint. Unlike +// the methods Int64, Float64, and so on, which panic if v is of the +// wrong kind, String never panics. +func (v Value) String() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + return string(v.append(nil)) +} + +func (v Value) str() string { + return unsafe.String(v.any.(stringptr), v.num) +} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85..9b4de940 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d8..105c3b27 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1564,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1594,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281..003e649f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984f..3b9f06b9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c640..6c349f3e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -153,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -399,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -425,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -434,7 +475,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -598,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -648,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -731,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -814,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -846,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -924,14 +963,14 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + if sc.srv.IdleTimeout > 0 { + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1060,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1428,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1481,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1637,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1659,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2017,9 +2062,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2038,7 +2083,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,8 +2161,8 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + if sc.hs.WriteTimeout > 0 { + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2341,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2637,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2759,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2775,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2785,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2801,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 00000000..0b1c17b8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86..61f511f9 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,46 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +348,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -446,6 +492,7 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() @@ -537,15 +584,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +611,13 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + tm := t.newTimer(d) select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +696,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -751,9 +792,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) + c = cc.tconn } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -785,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -818,6 +856,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + go cc.readLoop() return cc, nil } @@ -826,7 +870,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -861,7 +905,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1057,6 +1114,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1215,6 +1273,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,7 +1291,28 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { select { @@ -1322,8 +1405,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1334,7 +1418,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1372,24 +1456,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1452,9 +1520,9 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1875,6 +1943,22 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + var errNilRequestURL = errors.New("http2: Request.URI is nil") // requires cc.wmu be held. @@ -1912,19 +1996,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2165,6 +2244,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2266,10 +2346,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2763,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2911,6 +2990,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() @@ -2955,24 +3043,26 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) + var pingError error + errc := make(chan struct{}) go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } }() select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3231,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c66..f6783339 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 573fe79e..d7d4b8b6 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) { // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of // "example.com" matches "example.com" and all of its subdomains. func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } + zone = strings.TrimSuffix(zone, ".") if !strings.HasPrefix(zone, ".") { zone = "." + zone } @@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) { // AddHost specifies a host name that will use the bypass proxy. func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } + host = strings.TrimSuffix(host, ".") p.bypassHosts = append(p.bypassHosts, host) } diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index feb1157b..564920bd 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -6,16 +6,13 @@ package google import ( "context" - "time" + "log" + "sync" "golang.org/x/oauth2" ) -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string +var logOnce sync.Once // only spam about deprecation once // AppEngineTokenSource returns a token source that fetches tokens from either // the current application's service account or from the metadata server, @@ -23,8 +20,10 @@ var appengineAppIDFunc func(c context.Context) string // details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that // involves user accounts, see oauth2.Config instead. // -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the +// The current version of this library requires at least Go 1.17 to build, +// so first generation App Engine runtimes (<= Go 1.9) are unsupported. +// Previously, on first generation App Engine runtimes, AppEngineTokenSource +// returned a token source that fetches tokens issued to the // current App Engine application's service account. The provided context must have // come from appengine.NewContext. // @@ -34,5 +33,8 @@ var appengineAppIDFunc func(c context.Context) string // context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, // which DefaultTokenSource will use in this case) instead. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index e6158794..00000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 9c79aa0a..00000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 02ccd08a..df958359 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -22,7 +22,7 @@ import ( const ( adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" - universeDomainDefault = "googleapis.com" + defaultUniverseDomain = "googleapis.com" ) // Credentials holds Google credentials, including "Application Default Credentials". @@ -42,6 +42,17 @@ type Credentials struct { // running on Google Cloud Platform. JSON []byte + // UniverseDomainProvider returns the default service domain for a given + // Cloud universe. Optional. + // + // On GCE, UniverseDomainProvider should return the universe domain value + // from Google Compute Engine (GCE)'s metadata server. See also [The attached service + // account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). + // If the GCE metadata server returns a 404 error, the default universe + // domain value should be returned. If the GCE metadata server returns an + // error other than 404, the error should be returned. + UniverseDomainProvider func() (string, error) + udMu sync.Mutex // guards universeDomain // universeDomain is the default service domain for a given Cloud universe. universeDomain string @@ -58,60 +69,38 @@ type Credentials struct { // See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). func (c *Credentials) UniverseDomain() string { if c.universeDomain == "" { - return universeDomainDefault + return defaultUniverseDomain } return c.universeDomain } // GetUniverseDomain returns the default service domain for a given Cloud -// universe. +// universe. If present, UniverseDomainProvider will be invoked and its return +// value will be cached. // // The default value is "googleapis.com". -// -// It obtains the universe domain from the attached service account on GCE when -// authenticating via the GCE metadata server. See also [The attached service -// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). -// If the GCE metadata server returns a 404 error, the default value is -// returned. If the GCE metadata server returns an error other than 404, the -// error is returned. func (c *Credentials) GetUniverseDomain() (string, error) { c.udMu.Lock() defer c.udMu.Unlock() - if c.universeDomain == "" && metadata.OnGCE() { - // If we're on Google Compute Engine, an App Engine standard second - // generation runtime, or App Engine flexible, use the metadata server. - err := c.computeUniverseDomain() + if c.universeDomain == "" && c.UniverseDomainProvider != nil { + // On Google Compute Engine, an App Engine standard second generation + // runtime, or App Engine flexible, use an externally provided function + // to request the universe domain from the metadata server. + ud, err := c.UniverseDomainProvider() if err != nil { return "", err } + c.universeDomain = ud } - // If not on Google Compute Engine, or in case of any non-error path in - // computeUniverseDomain that did not set universeDomain, set the default - // universe domain. + // If no UniverseDomainProvider (meaning not on Google Compute Engine), or + // in case of any (non-error) empty return value from + // UniverseDomainProvider, set the default universe domain. if c.universeDomain == "" { - c.universeDomain = universeDomainDefault + c.universeDomain = defaultUniverseDomain } return c.universeDomain, nil } -// computeUniverseDomain fetches the default service domain for a given Cloud -// universe from Google Compute Engine (GCE)'s metadata server. It's only valid -// to use this method if your program is running on a GCE instance. -func (c *Credentials) computeUniverseDomain() error { - var err error - c.universeDomain, err = metadata.Get("universe/universe_domain") - if err != nil { - if _, ok := err.(metadata.NotDefinedError); ok { - // http.StatusNotFound (404) - c.universeDomain = universeDomainDefault - return nil - } else { - return err - } - } - return nil -} - // DefaultCredentials is the old name of Credentials. // // Deprecated: use Credentials instead. @@ -199,9 +188,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // 2. A JSON file in a location known to the gcloud command-line tool. // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// 3. On Google Compute Engine, Google App Engine standard second generation runtimes // (>= Go 1.11), and Google App Engine flexible environment, it fetches // credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { @@ -224,24 +211,27 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar return CredentialsFromJSONWithParams(ctx, b, params) } - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &Credentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, params.Scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // Third, if we're on Google Compute Engine, an App Engine standard second generation runtime, // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() + universeDomainProvider := func() (string, error) { + universeDomain, err := metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return defaultUniverseDomain, nil + } else { + return "", err + } + } + return universeDomain, nil + } return &Credentials{ - ProjectID: id, - TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), - universeDomain: params.UniverseDomain, + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + UniverseDomainProvider: universeDomainProvider, + universeDomain: params.UniverseDomain, }, nil } @@ -287,7 +277,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // Authorized user credentials are only supported in the googleapis.com universe. if f.Type == userCredentialsKey { - universeDomain = universeDomainDefault + universeDomain = defaultUniverseDomain } ts, err := f.tokenSource(ctx, params) diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 03c42c6f..830d268c 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -22,91 +22,9 @@ // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// # Workload Identity Federation +// # Workload and Workforce Identity Federation // -// Using workload identity federation, your application can access Google Cloud -// resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC) or SAML 2.0. -// Traditionally, applications running outside Google Cloud have used service -// account keys to access Google Cloud resources. Using identity federation, -// you can allow your workload to impersonate a service account. -// This lets you access Google Cloud resources directly, eliminating the -// maintenance and security burden associated with service account keys. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws -// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure -// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml -// -// For OIDC and SAML providers, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. -// -// # Workforce Identity Federation -// -// Workforce identity federation lets you use an external identity provider (IdP) to -// authenticate and authorize a workforce—a group of users, such as employees, partners, -// and contractors—using IAM, so that the users can access Google Cloud services. -// Workforce identity federation extends Google Cloud's identity capabilities to support -// syncless, attribute-based single sign on. -// -// With workforce identity federation, your workforce can access Google Cloud resources -// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or -// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation -// Services (AD FS), Okta, and others. -// -// Follow the detailed instructions on how to configure Workload Identity Federation -// in various platforms: -// -// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad -// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta -// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc -// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml -// -// For workforce identity federation, the library can retrieve tokens in three ways: -// from a local file location (file-sourced credentials), from a server -// (URL-sourced credentials), or from a local executable (executable-sourced -// credentials). -// For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC/SAML token prior to expiration. -// For tokens with one hour lifetimes, the token needs to be updated in the file -// every hour. The token can be stored directly as plain text or in JSON format. -// For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC/SAML token. The response can be in plain text or JSON. -// Additional required request headers can also be specified. -// For executable-sourced credentials, an application needs to be available to -// output the OIDC/SAML token and other information in a JSON format. -// For more information on how these work (and how to implement -// executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in -// -// # Security considerations -// -// Note that this library does not perform any validation on the token_url, token_info_url, -// or service_account_impersonation_url fields of the credential configuration. -// It is not recommended to use a credential configuration that you did not generate with -// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go similarity index 77% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go rename to vendor/golang.org/x/oauth2/google/externalaccount/aws.go index bd4efd19..ca27c2e9 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -26,22 +26,28 @@ import ( "golang.org/x/oauth2" ) -type awsSecurityCredentials struct { - AccessKeyID string `json:"AccessKeyID"` +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` - SecurityToken string `json:"Token"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` } // awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. type awsRequestSigner struct { RegionName string - AwsSecurityCredentials awsSecurityCredentials + AwsSecurityCredentials *AwsSecurityCredentials } // getenv aliases os.Getenv for testing var getenv = os.Getenv const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + // AWS Signature Version 4 signing algorithm identifier. awsAlgorithm = "AWS4-HMAC-SHA256" @@ -197,8 +203,8 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error { signedRequest.Header.Add("host", requestHost(req)) - if rs.AwsSecurityCredentials.SecurityToken != "" { - signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) } if signedRequest.Header.Get("date") == "" { @@ -251,16 +257,18 @@ func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp } type awsCredentialSource struct { - EnvironmentID string - RegionURL string - RegionalCredVerificationURL string - CredVerificationURL string - IMDSv2SessionTokenURL string - TargetResource string - requestSigner *awsRequestSigner - region string - ctx context.Context - client *http.Client + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions } type awsRequestHeader struct { @@ -292,18 +300,25 @@ func canRetrieveSecurityCredentialFromEnvironment() bool { return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" } -func shouldUseMetadataServer() bool { - return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) } func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } return "aws" } func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } if cs.requestSigner == nil { headers := make(map[string]string) - if shouldUseMetadataServer() { + if cs.shouldUseMetadataServer() { awsSessionToken, err := cs.getAWSSessionToken() if err != nil { return "", err @@ -318,8 +333,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { if err != nil { return "", err } - - if cs.region, err = cs.getRegion(headers); err != nil { + cs.region, err = cs.getRegion(headers) + if err != nil { return "", err } @@ -331,7 +346,7 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. - req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) if err != nil { return "", err } @@ -339,8 +354,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) { // provider, with or without the HTTPS prefix. // Including this header as part of the signature is recommended to // ensure data integrity. - if cs.TargetResource != "" { - req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) } cs.requestSigner.SignRequest(req) @@ -387,11 +402,11 @@ func (cs awsCredentialSource) subjectToken() (string, error) { } func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { - if cs.IMDSv2SessionTokenURL == "" { + if cs.imdsv2SessionTokenURL == "" { return "", nil } - req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) if err != nil { return "", err } @@ -410,25 +425,29 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) } return string(respBody), nil } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } if canRetrieveRegionFromEnvironment() { if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion return envAwsRegion, nil } return getenv("AWS_DEFAULT_REGION"), nil } - if cs.RegionURL == "" { - return "", errors.New("oauth2/google: unable to determine AWS region") + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") } - req, err := http.NewRequest("GET", cs.RegionURL, nil) + req, err := http.NewRequest("GET", cs.regionURL, nil) if err != nil { return "", err } @@ -449,7 +468,7 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) } // This endpoint will return the region in format: us-east-2b. @@ -461,12 +480,15 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } if canRetrieveSecurityCredentialFromEnvironment() { - return awsSecurityCredentials{ + return &AwsSecurityCredentials{ AccessKeyID: getenv(awsAccessKeyId), SecretAccessKey: getenv(awsSecretAccessKey), - SecurityToken: getenv(awsSessionToken), + SessionToken: getenv(awsSessionToken), }, nil } @@ -481,24 +503,23 @@ func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) } if credentials.AccessKeyID == "" { - return result, errors.New("oauth2/google: missing AccessKeyId credential") + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") } if credentials.SecretAccessKey == "" { - return result, errors.New("oauth2/google: missing SecretAccessKey credential") + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") } - return credentials, nil + return &credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { - var result awsSecurityCredentials +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials - req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) if err != nil { return result, err } - req.Header.Add("Content-Type", "application/json") for name, value := range headers { req.Header.Add(name, value) @@ -516,7 +537,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } if resp.StatusCode != 200 { - return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) } err = json.Unmarshal(respBody, &result) @@ -524,11 +545,11 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h } func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { - if cs.CredVerificationURL == "" { - return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") } - req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) if err != nil { return "", err } @@ -549,7 +570,7 @@ func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (s } if resp.StatusCode != 200 { - return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) } return string(respBody), nil diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 00000000..6c81a687 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go similarity index 85% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go index 843d1c33..dca5681a 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -39,51 +39,51 @@ func (nce nonCacheableError) Error() string { } func missingFieldError(source, field string) error { - return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) } func jsonParsingError(source, data string) error { - return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) } func malformedFailureError() error { - return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} } func userDefinedError(code, message string) error { - return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} } func unsupportedVersionError(source string, version int) error { - return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) } func tokenExpiredError() error { - return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} } func tokenTypeError(source string) error { - return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) } func exitCodeError(exitCode int) error { - return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) } func executableError(err error) error { - return fmt.Errorf("oauth2/google: executable command failed: %v", err) + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) } func executablesDisallowedError() error { - return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") } func timeoutRangeError() error { - return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") } func commandMissingError() error { - return errors.New("oauth2/google: missing `command` field — executable command must be provided") + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") } type environment interface { @@ -146,7 +146,7 @@ type executableCredentialSource struct { // CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. // It also performs defaulting and type conversions. -func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { if ec.Command == "" { return executableCredentialSource{}, commandMissingError() } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go similarity index 62% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go index f35f73c5..33766b97 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -16,7 +16,7 @@ import ( type fileCredentialSource struct { File string - Format format + Format Format } func (cs fileCredentialSource) credentialSourceType() string { @@ -26,12 +26,12 @@ func (cs fileCredentialSource) credentialSourceType() string { func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) } defer tokenFile.Close() tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) } tokenBytes = bytes.TrimSpace(tokenBytes) switch cs.Format.Type { @@ -39,15 +39,15 @@ func (cs fileCredentialSource) subjectToken() (string, error) { jsonData := make(map[string]interface{}) err = json.Unmarshal(tokenBytes, &jsonData) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) } val, ok := jsonData[cs.Format.SubjectTokenFieldName] if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") } token, ok := val.(string) if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") } return token, nil case "text": @@ -55,7 +55,7 @@ func (cs fileCredentialSource) subjectToken() (string, error) { case "": return string(tokenBytes), nil default: - return "", errors.New("oauth2/google: invalid credential_source file format type") + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") } } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go similarity index 100% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go rename to vendor/golang.org/x/oauth2/google/externalaccount/header.go diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 00000000..6c1abdf2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go similarity index 61% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go rename to vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go index 606bb4e8..71a7184e 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -19,7 +19,7 @@ import ( type urlCredentialSource struct { URL string Headers map[string]string - Format format + Format Format ctx context.Context } @@ -31,7 +31,7 @@ func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) if err != nil { - return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) } req = req.WithContext(cs.ctx) @@ -40,16 +40,16 @@ func (cs urlCredentialSource) subjectToken() (string, error) { } resp, err := client.Do(req) if err != nil { - return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { - return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) } if c := resp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody) + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) } switch cs.Format.Type { @@ -57,15 +57,15 @@ func (cs urlCredentialSource) subjectToken() (string, error) { jsonData := make(map[string]interface{}) err = json.Unmarshal(respBody, &jsonData) if err != nil { - return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) } val, ok := jsonData[cs.Format.SubjectTokenFieldName] if !ok { - return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") } token, ok := val.(string) if !ok { - return "", errors.New("oauth2/google: improperly formatted subject token") + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") } return token, nil case "text": @@ -73,7 +73,7 @@ func (cs urlCredentialSource) subjectToken() (string, error) { case "": return string(respBody), nil default: - return "", errors.New("oauth2/google: invalid credential_source file format type") + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") } } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index c66c5352..7b82e7a0 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -15,8 +15,9 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/externalaccount" "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" "golang.org/x/oauth2/jwt" ) @@ -200,12 +201,12 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, ClientSecret: f.ClientSecret, ClientID: f.ClientID, - CredentialSource: f.CredentialSource, + CredentialSource: &f.CredentialSource, QuotaProjectID: f.QuotaProjectID, Scopes: params.Scopes, WorkforcePoolUserProject: f.WorkforcePoolUserProject, } - return cfg.TokenSource(ctx) + return externalaccount.NewTokenSource(ctx, *cfg) case externalAccountAuthorizedUserKey: cfg := &externalaccountauthorizeduser.Config{ Audience: f.Audience, @@ -228,7 +229,7 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar if err != nil { return nil, err } - imp := externalaccount.ImpersonateTokenSource{ + imp := impersonate.ImpersonateTokenSource{ Ctx: ctx, URL: f.ServiceAccountImpersonationURL, Scopes: params.Scopes, @@ -251,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return computeTokenSource(account, 0, scope...) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) } func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go deleted file mode 100644 index 33288d36..00000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import ( - "context" - "fmt" - "net/http" - "regexp" - "strconv" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google/internal/stsexchange" -) - -// now aliases time.Now for testing -var now = func() time.Time { - return time.Now().UTC() -} - -// Config stores the configuration for fetching tokens with external credentials. -type Config struct { - // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload - // identity pool or the workforce pool and the provider identifier in that pool. - Audience string - // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec - // e.g. `urn:ietf:params:oauth:token-type:jwt`. - SubjectTokenType string - // TokenURL is the STS token exchange endpoint. - TokenURL string - // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( - // user attributes like account identifier, eg. email, username, uid, etc). This is - // needed for gCloud session account identification. - TokenInfoURL string - // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only - // required for workload identity pools when APIs to be accessed have not integrated with UberMint. - ServiceAccountImpersonationURL string - // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation - // token will be valid for. - ServiceAccountImpersonationLifetimeSeconds int - // ClientSecret is currently only required if token_info endpoint also - // needs to be called with the generated GCP access token. When provided, STS will be - // called with additional basic authentication using client_id as username and client_secret as password. - ClientSecret string - // ClientID is only required in conjunction with ClientSecret, as described above. - ClientID string - // CredentialSource contains the necessary information to retrieve the token itself, as well - // as some environmental information. - CredentialSource CredentialSource - // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries - // will set the x-goog-user-project which overrides the project associated with the credentials. - QuotaProjectID string - // Scopes contains the desired scopes for the returned access token. - Scopes []string - // The optional workforce pool user project number when the credential - // corresponds to a workforce pool and not a workload identity pool. - // The underlying principal must still have serviceusage.services.use IAM - // permission to use the project for billing/quota. - WorkforcePoolUserProject string -} - -var ( - validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) -) - -func validateWorkforceAudience(input string) bool { - return validWorkforceAudiencePattern.MatchString(input) -} - -// TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. -func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, "https") -} - -// tokenSource is a private function that's directly called by some of the tests, -// because the unit test URLs are mocked, and would otherwise fail the -// validity check. -func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { - if c.WorkforcePoolUserProject != "" { - valid := validateWorkforceAudience(c.Audience) - if !valid { - return nil, fmt.Errorf("oauth2/google: workforce_pool_user_project should not be set for non-workforce pool credentials") - } - } - - ts := tokenSource{ - ctx: ctx, - conf: c, - } - if c.ServiceAccountImpersonationURL == "" { - return oauth2.ReuseTokenSource(nil, ts), nil - } - scopes := c.Scopes - ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} - imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), - TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, - } - return oauth2.ReuseTokenSource(nil, imp), nil -} - -// Subject token file types. -const ( - fileTypeText = "text" - fileTypeJSON = "json" -) - -type format struct { - // Type is either "text" or "json". When not provided "text" type is assumed. - Type string `json:"type"` - // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. - SubjectTokenFieldName string `json:"subject_token_field_name"` -} - -// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. -// The EnvironmentID should start with AWS if being used for an AWS credential. -type CredentialSource struct { - File string `json:"file"` - - URL string `json:"url"` - Headers map[string]string `json:"headers"` - - Executable *ExecutableConfig `json:"executable"` - - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format format `json:"format"` -} - -type ExecutableConfig struct { - Command string `json:"command"` - TimeoutMillis *int `json:"timeout_millis"` - OutputFile string `json:"output_file"` -} - -// parse determines the type of CredentialSource needed. -func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { - if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { - if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { - if awsVersion != 1 { - return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) - } - - awsCredSource := awsCredentialSource{ - EnvironmentID: c.CredentialSource.EnvironmentID, - RegionURL: c.CredentialSource.RegionURL, - RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, - CredVerificationURL: c.CredentialSource.URL, - TargetResource: c.Audience, - ctx: ctx, - } - if c.CredentialSource.IMDSv2SessionTokenURL != "" { - awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL - } - - return awsCredSource, nil - } - } else if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil - } else if c.CredentialSource.URL != "" { - return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil - } else if c.CredentialSource.Executable != nil { - return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) - } - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") -} - -type baseCredentialSource interface { - credentialSourceType() string - subjectToken() (string, error) -} - -// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. -type tokenSource struct { - ctx context.Context - conf *Config -} - -func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { - return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", - goVersion(), - "unknown", - credSource.credentialSourceType(), - conf.ServiceAccountImpersonationURL != "", - conf.ServiceAccountImpersonationLifetimeSeconds != 0) -} - -// Token allows tokenSource to conform to the oauth2.TokenSource interface. -func (ts tokenSource) Token() (*oauth2.Token, error) { - conf := ts.conf - - credSource, err := conf.parse(ts.ctx) - if err != nil { - return nil, err - } - subjectToken, err := credSource.subjectToken() - - if err != nil { - return nil, err - } - stsRequest := stsexchange.TokenExchangeRequest{ - GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", - Audience: conf.Audience, - Scope: conf.Scopes, - RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", - SubjectToken: subjectToken, - SubjectTokenType: conf.SubjectTokenType, - } - header := make(http.Header) - header.Add("Content-Type", "application/x-www-form-urlencoded") - header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) - clientAuth := stsexchange.ClientAuthentication{ - AuthStyle: oauth2.AuthStyleInHeader, - ClientID: conf.ClientID, - ClientSecret: conf.ClientSecret, - } - var options map[string]interface{} - // Do not pass workforce_pool_user_project when client authentication is used. - // The client ID is sufficient for determining the user project. - if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { - options = map[string]interface{}{ - "userProject": conf.WorkforcePoolUserProject, - } - } - stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) - if err != nil { - return nil, err - } - - accessToken := &oauth2.Token{ - AccessToken: stsResp.AccessToken, - TokenType: stsResp.TokenType, - } - if stsResp.ExpiresIn < 0 { - return nil, fmt.Errorf("oauth2/google: got invalid expiry from security token service") - } else if stsResp.ExpiresIn >= 0 { - accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) - } - - if stsResp.RefreshToken != "" { - accessToken.RefreshToken = stsResp.RefreshToken - } - return accessToken, nil -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go deleted file mode 100644 index 233a78ce..00000000 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package externalaccount - -import "fmt" - -// Error for handling OAuth related error responses as stated in rfc6749#5.2. -type Error struct { - Code string - URI string - Description string -} - -func (err *Error) Error() string { - return fmt.Sprintf("got error code %s from %s: %s", err.Code, err.URI, err.Description) -} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go similarity index 99% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go rename to vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go index 54c8f209..6bc3af11 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package impersonate import ( "bytes" diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index d28140f7..00000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a6..b9db01dd 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 90a2c3d6..09f6a49b 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the // TokenSource returned by ReuseTokenSource, except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go index 30f632c5..b618162a 100644 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -35,11 +35,25 @@ type Weighted struct { // Acquire acquires the semaphore with a weight of n, blocking until resources // are available or ctx is done. On success, returns nil. On failure, returns // ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. func (s *Weighted) Acquire(ctx context.Context, n int64) error { + done := ctx.Done() + s.mu.Lock() + select { + case <-done: + // ctx becoming done has "happened before" acquiring the semaphore, + // whether it became done before the call began or while we were + // waiting for the mutex. We prefer to fail even if we could acquire + // the mutex without blocking. + s.mu.Unlock() + return ctx.Err() + default: + } if s.size-s.cur >= n && s.waiters.Len() == 0 { + // Since we hold s.mu and haven't synchronized since checking done, if + // ctx becomes done before we return here, it becoming done must have + // "happened concurrently" with this call - it cannot "happen before" + // we return in this branch. So, we're ok to always acquire here. s.cur += n s.mu.Unlock() return nil @@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { if n > s.size { // Don't make other Acquire calls block on one that's doomed to fail. s.mu.Unlock() - <-ctx.Done() + <-done return ctx.Err() } @@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { s.mu.Unlock() select { - case <-ctx.Done(): - err := ctx.Err() + case <-done: s.mu.Lock() select { case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil + // Acquired the semaphore after we were canceled. + // Pretend we didn't and put the tokens back. + s.cur -= n + s.notifyWaiters() default: isFront := s.waiters.Front() == elem s.waiters.Remove(elem) @@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { } } s.mu.Unlock() - return err + return ctx.Err() case <-ready: + // Acquired the semaphore. Check that ctx isn't already done. + // We check the done channel instead of calling ctx.Err because we + // already have the channel, and ctx.Err is O(n) with the nesting + // depth of ctx. + select { + case <-done: + s.Release(n) + return ctx.Err() + default: + } return nil } } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f..8fa707aa 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993b..0e27a21e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index fcb9a388..22cc9984 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -29,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index a8acd3e3..6ac6e1ef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80f..3d386d0f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/execabs/BUILD.bazel b/vendor/golang.org/x/sys/execabs/BUILD.bazel new file mode 100644 index 00000000..0f5b00e6 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "execabs", + srcs = [ + "execabs.go", + "execabs_go118.go", + "execabs_go119.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/sys/execabs", + importpath = "golang.org/x/sys/execabs", + visibility = ["//visibility:public"], +) diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 00000000..3bf40fdf --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 00000000..5627d70e --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 00000000..d60ab1b4 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/golang.org/x/sys/unix/BUILD.bazel b/vendor/golang.org/x/sys/unix/BUILD.bazel index be45b9e1..f03cfb4d 100644 --- a/vendor/golang.org/x/sys/unix/BUILD.bazel +++ b/vendor/golang.org/x/sys/unix/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "asm_linux_s390x.s", "asm_solaris_amd64.s", "bluetooth_linux.go", + "bpxsvc_zos.s", "cap_freebsd.go", "constants.go", "dev_aix_ppc64.go", @@ -56,6 +57,7 @@ go_library( "sockcmsg_linux.go", "sockcmsg_unix.go", "sockcmsg_unix_other.go", + "sockcmsg_zos.go", "syscall.go", "syscall_aix.go", "syscall_aix_ppc64.go", diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 2f67ba86..813dfad7 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -9,9 +9,11 @@ #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -19,394 +21,125 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - RET - -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - RET - -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - RET - -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET // func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - BYTE $0x0D // Branch to function - BYTE $0xEF + BYTE $0x0D // Branch to function + BYTE $0xEF - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 RET // func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer error: - MOVD $0, addr+8(FP) // Return 0 on failure + MOVD $0, ret+8(FP) // Return 0 on failure + done: - XOR R0, R0 // Reset r0 to 0 + XOR R0, R0 // Reset r0 to 0 RET // func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code RET // func gettid() uint64 @@ -417,7 +150,233 @@ TEXT ·gettid(SB), NOSPLIT, $0 // Get CEECAATHDID MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 + MOVD CEECAATHDID(R9), R9 MOVD R9, ret+0(FP) RET + +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) + NOPH + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + +done: + MOVD R4, 0(R9) // Save stack pointer. + RET + +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) + + // Restore LE stack. + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) + NOPH + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 + +done: + MOVD R4, 0(R9) // Save stack pointer. + RET + +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 + RET + +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 + RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 00000000..39d647d8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 00000000..4bd4a179 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index 7753fdde..00000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index c8bde601..00000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974..4ed2e488 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e597..7f602ffd 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe52..3a5e776f 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 4d0a3430..0482408d 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 130398b6..b903c006 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 00000000..3e53dbc0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 00000000..3c4f33cb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a89..4cc7b005 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8..4e92e5aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c..312ae6ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -4,11 +4,21 @@ //go:build zos && s390x +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. + package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -17,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -65,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -74,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -88,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -146,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -155,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -177,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -190,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -204,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -212,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -220,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -236,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -272,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -317,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -332,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -360,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err - } +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A - if err := Fchdir(dirfd); err != nil { - return err +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } - defer Chdir(wd) + return openat(dirfd, path, flags, mode) +} - return Mkfifo(path, mode) +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY + } + return openat2(dirfd, path, how, SizeofOpenHow) +} + +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -403,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -520,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -571,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -600,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1191,10 +1969,13 @@ func Opendir(name string) (uintptr, error) { if err != nil { return 0, err } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) + if dir == 0 { + err = errnoErr2(e1, e2) } return dir, err } @@ -1202,51 +1983,27 @@ func Opendir(name string) (uintptr, error) { // clearsyscall.Errno resets the errno value to 0. func clearErrno() -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) - } - return -} - func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1261,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) } -func Flock(fd int, how int) error { +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) +} + +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1307,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1372,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag } - return Fstatfs(fd, stat) + return (*funcref)(path, ccsid, textbit) } +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 + } + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err +} + +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1395,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1416,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1474,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1778,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount } else { - fstype += " "[:needspace] + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { + if needspace := 8 - len(fstype); needspace <= 0 { + fstype = fstype[0:8] + } else { + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1896,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1910,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1956,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) -} - -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) -} - -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil } +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return +} + +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } +} + +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) +} + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 79a84f18..672d6b0a 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 9eb0db66..8b7977a2 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36bf8399..877a62b4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -491,6 +491,7 @@ const ( BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -501,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -656,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1338,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1626,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1652,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -1697,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 @@ -1898,6 +1907,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2166,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2302,6 +2312,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -2399,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2892,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2914,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3047,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3067,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3168,6 +3187,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3255,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 @@ -3562,12 +3583,16 @@ const ( XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_SG = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c..e4bc0bd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca43600..689317af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1..14270508 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4dfd2e05..da08b2ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -10,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -152,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -165,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -182,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -205,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -248,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -285,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -378,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -427,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -452,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -476,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -700,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -743,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 00000000..b77ff5db --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f24..07642c30 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284..923e08cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997..7d73dda6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1a..05770011 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 94f01123..7ccf66b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x @@ -6,22 +6,105 @@ package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr + } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -29,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -46,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr + } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 + } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -78,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -88,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -99,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -109,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -119,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -129,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -140,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -150,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -160,15 +379,57 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr + } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -176,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -193,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -203,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -214,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -225,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -236,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -246,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -256,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -271,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -286,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -301,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -316,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -331,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -342,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -353,42 +693,359 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) + +var Dup3 = enter_Dup3 + +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} + +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) + +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd + } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) + +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate + } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) + +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 + } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) + +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl + } + return (*funcref)(epfd, op, fd, event) +} + +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) + +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait + } else { + *funcref = error_EpollPwait + } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) + +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait + } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -396,50 +1053,333 @@ func Fchdir(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat + } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -447,28 +1387,461 @@ func Fstatvfs(fd int, stat *Statvfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Futimes(fd int, tv []Timeval) (err error) { + var _p0 unsafe.Pointer + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) + +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit + } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS return } @@ -481,9 +1854,11 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -497,9 +1872,23 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -513,21 +1902,106 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -535,10 +2009,12 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -546,10 +2022,12 @@ func W_Getmntent(buff *byte, size int) (lastsys int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -577,24 +2055,28 @@ func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen if err != nil { return } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func unmount(filesystem string, mtm int) (err error) { +func unmount_LE(filesystem string, mtm int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(filesystem) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -607,9 +2089,24 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -617,15 +2114,47 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gethostname(buf []byte) (err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -633,33 +2162,19 @@ func Gethostname(buf []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) gid = int(r0) return } @@ -667,7 +2182,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) pid = int(r0) return } @@ -675,10 +2190,10 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -686,7 +2201,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) pid = int(r0) return } @@ -694,10 +2209,12 @@ func Getppid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -705,9 +2222,9 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -715,20 +2232,40 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -736,7 +2273,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) uid = int(r0) return } @@ -744,9 +2281,9 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -759,9 +2296,11 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -779,19 +2318,65 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -804,9 +2389,11 @@ func lstat(path string, stat *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -819,24 +2406,65 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -849,15 +2477,96 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -865,10 +2574,12 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -882,36 +2593,78 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) + return +} + +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) + +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Prctl } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return (*funcref)(option, arg2, arg3, arg4, arg5) +} + +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) + +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit + } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -925,24 +2678,112 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) + +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat + } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) + +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 + } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -950,20 +2791,118 @@ func Rmdir(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) + +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname + } else { + *funcref = error_Sethostname + } + return (*funcref)(p) +} + +func error_Sethostname(p []byte) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) + +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns + } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -971,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -981,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -991,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1001,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1011,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1022,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1032,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1042,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1057,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1077,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1099,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1109,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1119,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1129,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1142,24 +3143,65 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat + } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Utime(path string, utim *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1172,25 +3214,119 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 + } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func remove(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1198,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1209,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1219,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1234,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0cc3ce49..53aef5dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -452,4 +452,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 856d92d6..71d52476 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -374,4 +374,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 8d467094..c7477061 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -416,4 +416,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index edc17324..f96e214f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -319,4 +319,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 445eba20..28425346 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -313,4 +313,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index adba01bc..d0953018 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 014c4e9c..295c7f4b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index ccc97d74..d1a9eaca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ec2b64a9..bec157c3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 21a839e3..7ee7bdc4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -443,4 +443,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index c11121ec..fad1f25b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 909b631f..7d3e1635 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e49bed16..0ed53ad9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -320,4 +320,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 66017d2d..2fba04ad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -381,4 +381,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 47bab18d..621d00d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -394,4 +394,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index b2e30858..5e8c263c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2669 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955e..4740b834 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1169,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1189,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1550,6 +1560,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1576,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1624,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1664,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1693,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1719,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1765,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1781,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1794,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1825,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1843,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1902,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1941,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -2421,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2875,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -3151,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -4535,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4801,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4905,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5139,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5633,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -5931,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 438a30af..fd402da4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -477,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index adceca35..eb7a5e18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -492,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index eeaa00a3..d78ac108 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -470,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6739aa91..cd06d47f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -471,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 9920ef63..2f28fe26 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -472,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2923b799..71d6cac2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ce2750ee..8596d453 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 3038811d..cd60ea18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index efc6fed1..b0ae420c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 9a654b75..83597287 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -482,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 40d358e3..69eb6a5c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 148c6ceb..5f583cb6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 72ba8154..15adc041 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -499,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 71e76550..cf3ce900 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -495,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4abbdb9d..590b5673 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 54f31be6..d9a13af4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -25,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -69,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -325,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -336,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -412,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/BUILD.bazel b/vendor/golang.org/x/sys/windows/BUILD.bazel index 5bfc4253..8b268157 100644 --- a/vendor/golang.org/x/sys/windows/BUILD.bazel +++ b/vendor/golang.org/x/sys/windows/BUILD.bazel @@ -5,7 +5,6 @@ go_library( srcs = [ "aliases.go", "dll_windows.go", - "empty.s", "env_windows.go", "eventlog.go", "exec_windows.go", diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index ce2d713d..16f90560 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index ba64caca..00000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/registry/BUILD.bazel b/vendor/golang.org/x/sys/windows/registry/BUILD.bazel new file mode 100644 index 00000000..8422822b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "registry", + srcs = [ + "key.go", + "syscall.go", + "value.go", + "zsyscall_windows.go", + ], + importmap = "peridot.resf.org/vendor/golang.org/x/sys/windows/registry", + importpath = "golang.org/x/sys/windows/registry", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000..fd863244 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000..bbf86ccf --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000..f533091c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000..74db26b9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000..fc1835d8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8..97651b5b 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -893,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1086,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1157,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031..6525c62f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6..d8cb71db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82..eba76101 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -188,6 +189,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +215,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +241,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +329,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +343,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +353,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +361,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +372,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +391,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -389,6 +402,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -1211,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -1641,6 +1663,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1883,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1903,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2112,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2880,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +3002,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3091,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3154,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3247,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3401,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { @@ -3378,6 +3496,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json index 0c82b86a..633d334d 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://iamcredentials.googleapis.com/", "batchPath": "batch", "canonicalName": "IAM Credentials", - "description": "Creates short-lived credentials for impersonating IAM service accounts. To enable this API, you must enable the IAM API (iam.googleapis.com). ", + "description": "Creates short-lived credentials for impersonating IAM service accounts. Disabling this API also disables the IAM API (iam.googleapis.com). However, enabling this API doesn't enable the IAM API. ", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", "fullyEncodeReservedExpansion": true, @@ -226,7 +226,7 @@ } } }, - "revision": "20211203", + "revision": "20240227", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 947e83a5..f0c69484 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -92,12 +92,11 @@ const apiVersion = "v1" const basePath = "https://iamcredentials.googleapis.com/" const basePathTemplate = "https://iamcredentials.UNIVERSE_DOMAIN/" const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" -const defaultUniverseDomain = "googleapis.com" // OAuth2 scopes used by this API. const ( - // See, edit, configure, and delete your Google Cloud data and see the - // email address for your Google Account. + // See, edit, configure, and delete your Google Cloud data and see the email + // address for your Google Account. CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) @@ -111,7 +110,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) - opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -177,354 +176,276 @@ type ProjectsServiceAccountsService struct { } type GenerateAccessTokenRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // This field is required for delegated requests - // (https://cloud.google.com/iam/help/credentials/delegated-request). - // For direct requests - // (https://cloud.google.com/iam/help/credentials/direct-request), which - // are more common, do not specify this field. Each service account must - // be granted the `roles/iam.serviceAccountTokenCreator` role on its - // next service account in the chain. The last service account in the - // chain must be granted the `roles/iam.serviceAccountTokenCreator` role - // on the service account that is specified in the `name` field of the - // request. The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is + // Delegates: The sequence of service accounts in a delegation chain. This + // field is required for delegated requests + // (https://cloud.google.com/iam/help/credentials/delegated-request). For + // direct requests + // (https://cloud.google.com/iam/help/credentials/direct-request), which are + // more common, do not specify this field. Each service account must be granted + // the `roles/iam.serviceAccountTokenCreator` role on its next service account + // in the chain. The last service account in the chain must be granted the + // `roles/iam.serviceAccountTokenCreator` role on the service account that is + // specified in the `name` field of the request. The delegates must have the + // following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. + // The `-` wildcard character is required; replacing it with a project ID is // invalid. Delegates []string `json:"delegates,omitempty"` - - // Lifetime: The desired lifetime duration of the access token in - // seconds. By default, the maximum allowed value is 1 hour. To set a - // lifetime of up to 12 hours, you can add the service account as an - // allowed value in an Organization Policy that enforces the - // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` - // constraint. See detailed instructions at - // https://cloud.google.com/iam/help/credentials/lifetime If a value is - // not specified, the token's lifetime will be set to a default value of - // 1 hour. + // Lifetime: The desired lifetime duration of the access token in seconds. By + // default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 + // hours, you can add the service account as an allowed value in an + // Organization Policy that enforces the + // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. + // See detailed instructions at + // https://cloud.google.com/iam/help/credentials/lifetime If a value is not + // specified, the token's lifetime will be set to a default value of 1 hour. Lifetime string `json:"lifetime,omitempty"` - - // Scope: Required. Code to identify the scopes to be included in the - // OAuth 2.0 access token. See - // https://developers.google.com/identity/protocols/googlescopes for - // more information. At least one value required. + // Scope: Required. Code to identify the scopes to be included in the OAuth 2.0 + // access token. See + // https://developers.google.com/identity/protocols/googlescopes for more + // information. At least one value required. Scope []string `json:"scope,omitempty"` - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateAccessTokenResponse struct { // AccessToken: The OAuth 2.0 access token. AccessToken string `json:"accessToken,omitempty"` - // ExpireTime: Token expiration time. The expiration time is always set. ExpireTime string `json:"expireTime,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AccessToken") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AccessToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateAccessTokenResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenRequest struct { - // Audience: Required. The audience for the token, such as the API or - // account that this token grants access to. + // Audience: Required. The audience for the token, such as the API or account + // that this token grants access to. Audience string `json:"audience,omitempty"` - - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. Delegates []string `json:"delegates,omitempty"` - - // IncludeEmail: Include the service account email in the token. If set - // to `true`, the token will contain `email` and `email_verified` - // claims. + // IncludeEmail: Include the service account email in the token. If set to + // `true`, the token will contain `email` and `email_verified` claims. IncludeEmail bool `json:"includeEmail,omitempty"` - // ForceSendFields is a list of field names (e.g. "Audience") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Audience") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Audience") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type GenerateIdTokenResponse struct { // Token: The OpenId Connect ID token. Token string `json:"token,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Token") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Token") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Token") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { +func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { type NoMethod GenerateIdTokenResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. Delegates []string `json:"delegates,omitempty"` - // Payload: Required. The bytes to sign. Payload string `json:"payload,omitempty"` - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SignBlobRequest) MarshalJSON() ([]byte, error) { +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { type NoMethod SignBlobRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignBlobResponse struct { - // KeyId: The ID of the key used to sign the blob. The key used for - // signing will remain valid for at least 12 hours after the blob is - // signed. To verify the signature, you can retrieve the public key in - // several formats from the following endpoints: - RSA public key - // wrapped in an X.509 v3 certificate: - // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT - // _EMAIL}` - Raw key in JSON format: - // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ - // EMAIL}` - JSON Web Key (JWK): - // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ - // EMAIL}` + // KeyId: The ID of the key used to sign the blob. The key used for signing + // will remain valid for at least 12 hours after the blob is signed. To verify + // the signature, you can retrieve the public key in several formats from the + // following endpoints: - RSA public key wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL} + // ` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` + // - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}` KeyId string `json:"keyId,omitempty"` - - // SignedBlob: The signature for the blob. Does not include the original - // blob. After the key pair referenced by the `key_id` response field - // expires, Google no longer exposes the public key that can be used to - // verify the blob. As a result, the receiver can no longer verify the - // signature. + // SignedBlob: The signature for the blob. Does not include the original blob. + // After the key pair referenced by the `key_id` response field expires, Google + // no longer exposes the public key that can be used to verify the blob. As a + // result, the receiver can no longer verify the signature. SignedBlob string `json:"signedBlob,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "KeyId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyId") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SignBlobResponse) MarshalJSON() ([]byte, error) { +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { type NoMethod SignBlobResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtRequest struct { - // Delegates: The sequence of service accounts in a delegation chain. - // Each service account must be granted the - // `roles/iam.serviceAccountTokenCreator` role on its next service - // account in the chain. The last service account in the chain must be - // granted the `roles/iam.serviceAccountTokenCreator` role on the - // service account that is specified in the `name` field of the request. + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. // The delegates must have the following format: - // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` - // wildcard character is required; replacing it with a project ID is - // invalid. + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. Delegates []string `json:"delegates,omitempty"` - - // Payload: Required. The JWT payload to sign. Must be a serialized JSON - // object that contains a JWT Claims Set. For example: `{"sub": - // "user@example.com", "iat": 313435}` If the JWT Claims Set contains an - // expiration time (`exp`) claim, it must be an integer timestamp that - // is not in the past and no more than 12 hours in the future. + // Payload: Required. The JWT payload to sign. Must be a serialized JSON object + // that contains a JWT Claims Set. For example: `{"sub": "user@example.com", + // "iat": 313435}` If the JWT Claims Set contains an expiration time (`exp`) + // claim, it must be an integer timestamp that is not in the past and no more + // than 12 hours in the future. Payload string `json:"payload,omitempty"` - // ForceSendFields is a list of field names (e.g. "Delegates") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Delegates") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SignJwtRequest) MarshalJSON() ([]byte, error) { +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { type NoMethod SignJwtRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type SignJwtResponse struct { - // KeyId: The ID of the key used to sign the JWT. The key used for - // signing will remain valid for at least 12 hours after the JWT is - // signed. To verify the signature, you can retrieve the public key in - // several formats from the following endpoints: - RSA public key - // wrapped in an X.509 v3 certificate: - // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT - // _EMAIL}` - Raw key in JSON format: - // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_ - // EMAIL}` - JSON Web Key (JWK): - // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_ - // EMAIL}` + // KeyId: The ID of the key used to sign the JWT. The key used for signing will + // remain valid for at least 12 hours after the JWT is signed. To verify the + // signature, you can retrieve the public key in several formats from the + // following endpoints: - RSA public key wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL} + // ` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` + // - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}` KeyId string `json:"keyId,omitempty"` - - // SignedJwt: The signed JWT. Contains the automatically generated - // header; the client-supplied payload; and the signature, which is - // generated using the key referenced by the `kid` field in the header. - // After the key pair referenced by the `key_id` response field expires, - // Google no longer exposes the public key that can be used to verify - // the JWT. As a result, the receiver can no longer verify the - // signature. + // SignedJwt: The signed JWT. Contains the automatically generated header; the + // client-supplied payload; and the signature, which is generated using the key + // referenced by the `kid` field in the header. After the key pair referenced + // by the `key_id` response field expires, Google no longer exposes the public + // key that can be used to verify the JWT. As a result, the receiver can no + // longer verify the signature. SignedJwt string `json:"signedJwt,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "KeyId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyId") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *SignJwtResponse) MarshalJSON() ([]byte, error) { +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { type NoMethod SignJwtResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// method id "iamcredentials.projects.serviceAccounts.generateAccessToken": - type ProjectsServiceAccountsGenerateAccessTokenCall struct { s *Service name string @@ -534,14 +455,13 @@ type ProjectsServiceAccountsGenerateAccessTokenCall struct { header_ http.Header } -// GenerateAccessToken: Generates an OAuth 2.0 access token for a -// service account. +// GenerateAccessToken: Generates an OAuth 2.0 access token for a service +// account. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -550,23 +470,21 @@ func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, genera } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateAccessTokenCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateAccessTokenCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -575,18 +493,12 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { } func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateAccessToken") @@ -603,12 +515,11 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) ( } // Do executes the "iamcredentials.projects.serviceAccounts.generateAccessToken" call. -// Exactly one of *GenerateAccessTokenResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.CallOption) (*GenerateAccessTokenResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -639,39 +550,8 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca return nil, err } return ret, nil - // { - // "description": "Generates an OAuth 2.0 access token for a service account.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:generateAccessToken", - // "request": { - // "$ref": "GenerateAccessTokenRequest" - // }, - // "response": { - // "$ref": "GenerateAccessTokenResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - } -// method id "iamcredentials.projects.serviceAccounts.generateIdToken": - type ProjectsServiceAccountsGenerateIdTokenCall struct { s *Service name string @@ -681,14 +561,12 @@ type ProjectsServiceAccountsGenerateIdTokenCall struct { header_ http.Header } -// GenerateIdToken: Generates an OpenID Connect ID token for a service -// account. +// GenerateIdToken: Generates an OpenID Connect ID token for a service account. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -697,23 +575,21 @@ func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateid } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsServiceAccountsGenerateIdTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateIdTokenCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsServiceAccountsGenerateIdTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateIdTokenCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -722,18 +598,12 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { } func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateIdToken") @@ -750,12 +620,11 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*htt } // Do executes the "iamcredentials.projects.serviceAccounts.generateIdToken" call. -// Exactly one of *GenerateIdTokenResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either +// Any non-2xx status code is an error. Response headers are in either // *GenerateIdTokenResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOption) (*GenerateIdTokenResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -786,39 +655,8 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp return nil, err } return ret, nil - // { - // "description": "Generates an OpenID Connect ID token for a service account.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.generateIdToken", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:generateIdToken", - // "request": { - // "$ref": "GenerateIdTokenRequest" - // }, - // "response": { - // "$ref": "GenerateIdTokenResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - } -// method id "iamcredentials.projects.serviceAccounts.signBlob": - type ProjectsServiceAccountsSignBlobCall struct { s *Service name string @@ -828,14 +666,12 @@ type ProjectsServiceAccountsSignBlobCall struct { header_ http.Header } -// SignBlob: Signs a blob using a service account's system-managed -// private key. +// SignBlob: Signs a blob using a service account's system-managed private key. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -844,23 +680,21 @@ func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest * } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsServiceAccountsSignBlobCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignBlobCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsServiceAccountsSignBlobCall) Context(ctx context.Context) *ProjectsServiceAccountsSignBlobCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -869,18 +703,12 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { } func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signBlob") @@ -897,12 +725,11 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo } // Do executes the "iamcredentials.projects.serviceAccounts.signBlob" call. -// Exactly one of *SignBlobResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SignBlobResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *SignBlobResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) (*SignBlobResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -933,39 +760,8 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( return nil, err } return ret, nil - // { - // "description": "Signs a blob using a service account's system-managed private key.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.signBlob", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:signBlob", - // "request": { - // "$ref": "SignBlobRequest" - // }, - // "response": { - // "$ref": "SignBlobResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - } -// method id "iamcredentials.projects.serviceAccounts.signJwt": - type ProjectsServiceAccountsSignJwtCall struct { s *Service name string @@ -975,14 +771,12 @@ type ProjectsServiceAccountsSignJwtCall struct { header_ http.Header } -// SignJwt: Signs a JWT using a service account's system-managed private -// key. +// SignJwt: Signs a JWT using a service account's system-managed private key. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -991,23 +785,21 @@ func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *Si } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -1016,18 +808,12 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { } func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") @@ -1044,12 +830,11 @@ func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Respon } // Do executes the "iamcredentials.projects.serviceAccounts.signJwt" call. -// Exactly one of *SignJwtResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SignJwtResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *SignJwtResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -1080,33 +865,4 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* return nil, err } return ret, nil - // { - // "description": "Signs a JWT using a service account's system-managed private key.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", - // "httpMethod": "POST", - // "id": "iamcredentials.projects.serviceAccounts.signJwt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", - // "location": "path", - // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:signJwt", - // "request": { - // "$ref": "SignJwtRequest" - // }, - // "response": { - // "$ref": "SignJwtResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b6489309..5ea555ed 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -15,6 +15,8 @@ import ( "os" "time" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/oauth2adapt" "golang.org/x/oauth2" "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" @@ -27,6 +29,9 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.IsNewAuthLibraryEnabled() { + return credsNewAuth(ctx, ds) + } creds, err := baseCreds(ctx, ds) if err != nil { return nil, err @@ -37,6 +42,84 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + +func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { + // Preserve old options behavior + if settings.InternalCredentials != nil { + return settings.InternalCredentials, nil + } else if settings.Credentials != nil { + return settings.Credentials, nil + } else if settings.TokenSource != nil { + return &google.Credentials{TokenSource: settings.TokenSource}, nil + } + + if settings.AuthCredentials != nil { + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil + } + + var useSelfSignedJWT bool + var aud string + var scopes []string + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if settings.EnableJwtWithScope || len(settings.Audiences) > 0 { + useSelfSignedJWT = true + } + + if len(settings.Scopes) > 0 { + scopes = make([]string, len(settings.Scopes)) + copy(scopes, settings.Scopes) + } + if len(settings.Audiences) > 0 { + aud = settings.Audiences[0] + } + // Only default scopes if user did not also set an audience. + if len(settings.Scopes) == 0 && aud == "" && len(settings.DefaultScopes) > 0 { + scopes = make([]string, len(settings.DefaultScopes)) + copy(scopes, settings.DefaultScopes) + } + if len(scopes) == 0 && aud == "" { + aud = settings.DefaultAudience + } + + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) + if err != nil { + return nil, err + } + creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: scopes, + Audience: aud, + CredentialsFile: settings.CredentialsFile, + CredentialsJSON: settings.CredentialsJSON, + UseSelfSignedJWT: useSelfSignedJWT, + TokenURL: tokenURL, + Client: oauth2Client, + }) + if err != nil { + return nil, err + } + + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil +} + func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.InternalCredentials != nil { return ds.InternalCredentials, nil @@ -44,7 +127,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { @@ -89,19 +172,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g var params google.CredentialsParams params.Scopes = ds.GetScopes() - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, err := getClientCertificateSource(ds) + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - params.TokenURL = oAuth2Endpoint(clientCertSource) - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 1a30d2ca..6a5326c0 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -5,9 +5,11 @@ package gensupport import ( + "net/http" "net/url" "google.golang.org/api/googleapi" + "google.golang.org/api/internal" ) // URLParams is a simplified replacement for url.Values @@ -55,3 +57,22 @@ func SetOptions(u URLParams, opts ...googleapi.CallOption) { u.Set(o.Get()) } } + +// SetHeaders sets common headers for all requests. The keyvals header pairs +// should have a corresponding value for every key provided. If there is an odd +// number of keyvals this method will panic. +func SetHeaders(userAgent, contentType string, userHeaders http.Header, keyvals ...string) http.Header { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+GoVersion()+" gdcl/"+internal.Version) + for i := 0; i < len(keyvals); i = i + 2 { + reqHeaders.Set(keyvals[i], keyvals[i+1]) + } + reqHeaders.Set("User-Agent", userAgent) + if contentType != "" { + reqHeaders.Set("Content-Type", contentType) + } + for k, v := range userHeaders { + reqHeaders[k] = v + } + return reqHeaders +} diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 08e7aace..f828ddb6 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -171,6 +171,10 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err if resp != nil && resp.Body != nil { resp.Body.Close() } + // If there were retries, indicate this in the error message and wrap the final error. + if rx.attempts > 1 { + return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err) + } return nil, err } // This case is very unlikely but possible only if rx.ChunkRetryDeadline is diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 20b57d92..089ee318 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -8,6 +8,7 @@ import ( "errors" "io" "net" + "net/url" "strings" "time" @@ -29,8 +30,6 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } - // syscallRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -56,30 +55,33 @@ func shouldRetry(status int, err error) bool { if status == statusTooManyRequests || status == statusRequestTimeout { return true } - if err == io.ErrUnexpectedEOF { + if errors.Is(err, io.ErrUnexpectedEOF) { return true } - // Transient network errors should be retried. - if syscallRetryable(err) { + if errors.Is(err, net.ErrClosed) { return true } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true + switch e := err.(type) { + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } } - } - var opErr *net.OpError - if errors.As(err, &opErr) { - if strings.Contains(opErr.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string + case interface{ Temporary() bool }: + if e.Temporary() { return true } } - // If Go 1.13 error unwrapping is available, use this to examine wrapped + // If error unwrapping is available, use this to examine wrapped // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) + if e, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, e.Unwrap()) } return false } diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go deleted file mode 100644 index a916c3da..00000000 --- a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package gensupport - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f39dd00d..f6716134 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -48,8 +48,24 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx != nil { headers := callctx.HeadersFromContext(ctx) for k, vals := range headers { - for _, v := range vals { - req.Header.Add(k, v) + if k == "x-goog-api-client" { + // Merge all values into a single "x-goog-api-client" header. + var mergedVal strings.Builder + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + if baseXGoogHeader != "" { + mergedVal.WriteString(baseXGoogHeader) + mergedVal.WriteRune(' ') + } + for _, v := range vals { + mergedVal.WriteString(v) + mergedVal.WriteRune(' ') + } + // Remove the last space and replace the header on the request. + req.Header.Set(k, mergedVal.String()[:mergedVal.Len()-1]) + } else { + for _, v := range vals { + req.Header.Add(k, v) + } } } } @@ -118,7 +134,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var err error attempts := 1 invocationID := uuid.New().String() - baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + + xGoogHeaderVals := req.Header.Values("X-Goog-Api-Client") + baseXGoogHeader := strings.Join(xGoogHeaderVals, " ") // Loop to retry the request, up to the context deadline. var pause time.Duration diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index e17141a6..edba49af 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -13,6 +13,7 @@ import ( "strconv" "time" + "cloud.google.com/go/auth" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/internal/impersonate" @@ -20,8 +21,10 @@ import ( ) const ( - newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" - universeDomainDefault = "googleapis.com" + newAuthLibEnvVar = "GOOGLE_API_GO_EXPERIMENTAL_ENABLE_NEW_AUTH_LIB" + newAuthLibDisabledEnVar = "GOOGLE_API_GO_EXPERIMENTAL_DISABLE_NEW_AUTH_LIB" + universeDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + defaultUniverseDomain = "googleapis.com" ) // DialSettings holds information needed to establish a connection with a @@ -56,15 +59,17 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool - EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool - UniverseDomain string DefaultUniverseDomain string - + UniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters QuotaProject string RequestReason string + + // New Auth library Options + AuthCredentials *auth.Credentials + EnableNewAuthLibrary bool } // GetScopes returns the user-provided scopes, if set, or else falls back to the @@ -89,11 +94,17 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +// IsNewAuthLibraryEnabled returns true if the new auth library should be used. func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + // Disabled env is for future rollouts to make sure there is a way to easily + // disable this behaviour once we switch in on by default. + if b, err := strconv.ParseBool(os.Getenv(newAuthLibDisabledEnVar)); err == nil && b { + return false + } if ds.EnableNewAuthLibrary { return true } - if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnvVar)); err == nil { return b } return false @@ -115,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { @@ -164,35 +175,38 @@ func (ds *DialSettings) Validate() error { return nil } -// GetDefaultUniverseDomain returns the default service domain for a given Cloud -// universe, as configured with internaloption.WithDefaultUniverseDomain. -// The default value is "googleapis.com". +// GetDefaultUniverseDomain returns the Google default universe domain +// ("googleapis.com"). func (ds *DialSettings) GetDefaultUniverseDomain() string { - if ds.DefaultUniverseDomain == "" { - return universeDomainDefault - } - return ds.DefaultUniverseDomain + return defaultUniverseDomain } // GetUniverseDomain returns the default service domain for a given Cloud -// universe, as configured with option.WithUniverseDomain. -// The default value is the value of GetDefaultUniverseDomain, as configured -// with internaloption.WithDefaultUniverseDomain. +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". func (ds *DialSettings) GetUniverseDomain() string { - if ds.UniverseDomain == "" { - return ds.GetDefaultUniverseDomain() + if ds.UniverseDomain != "" { + return ds.UniverseDomain } - return ds.UniverseDomain + if envUD := os.Getenv(universeDomainEnvVar); envUD != "" { + return envUD + } + return defaultUniverseDomain } +// IsUniverseDomainGDU returns true if the universe domain is the default Google +// universe ("googleapis.com"). func (ds *DialSettings) IsUniverseDomainGDU() bool { - return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() + return ds.GetUniverseDomain() == defaultUniverseDomain } // GetUniverseDomain returns the default service domain for a given Cloud // universe, from google.Credentials, for comparison with the value returned by // (*DialSettings).GetUniverseDomain. This wrapper function should be removed -// to close [TODO(chrisdsmith): issue link here]. See details below. +// to close https://github.com/googleapis/google-api-go-client/issues/2399. func GetUniverseDomain(creds *google.Credentials) (string, error) { timer := time.NewTimer(time.Second) defer timer.Stop() @@ -209,9 +223,10 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) { }() select { - case err := <-errors: - // An error that is returned before the timer expires is legitimate. - return "", err + case <-errors: + // An error that is returned before the timer expires is likely to be + // connection refused. Temporarily (2024-03-21) return the GDU domain. + return defaultUniverseDomain, nil case res := <-results: return res, nil case <-timer.C: // Timer is expired. @@ -223,6 +238,6 @@ func GetUniverseDomain(creds *google.Credentials) (string, error) { // calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go // and remove this method to close // https://github.com/googleapis/google-api-go-client/issues/2399. - return universeDomainDefault, nil + return defaultUniverseDomain, nil } } diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 5e418fd8..e2edf688 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.166.0" +const Version = "0.188.0" diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index c882c1eb..23aba01b 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -9,6 +9,7 @@ import ( "crypto/tls" "net/http" + "cloud.google.com/go/auth" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/internal" @@ -344,6 +345,19 @@ func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } +// WithAuthCredentials returns a ClientOption that specifies an +// [cloud.google.com/go/auth.Credentials] to be used as the basis for +// authentication. +func WithAuthCredentials(creds *auth.Credentials) ClientOption { + return withAuthCredentials{creds} +} + +type withAuthCredentials struct{ creds *auth.Credentials } + +func (w withAuthCredentials) Apply(o *internal.DialSettings) { + o.AuthCredentials = w.creds +} + // WithUniverseDomain returns a ClientOption that sets the universe domain. // // This is an EXPERIMENTAL API and may be changed or removed in the future. diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index cb34638d..9c06c5ed 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -27,13 +27,23 @@ "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" } ], - "etag": "\"3135323132313733303039343736303631393739\"", + "etag": "\"39393931363036383932333134343736343437\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -681,6 +691,38 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "getStorageLayout": { + "description": "Returns the storage layout configuration for the specified bucket. Note that this operation requires storage.objects.list permission.", + "httpMethod": "GET", + "id": "storage.buckets.getStorageLayout", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "prefix": { + "description": "An optional prefix used for permission check. It is useful when the caller only has storage.objects.list permission under a specific prefix.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/storageLayout", + "response": { + "$ref": "BucketStorageLayout" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "insert": { "description": "Creates a new bucket.", "httpMethod": "POST", @@ -1632,6 +1674,11 @@ "managedFolder" ], "parameters": { + "allowNonEmpty": { + "description": "Allows the deletion of a managed folder even if it is not empty. A managed folder is empty if there are no objects or managed folders that it applies to. Callers must have storage.managedFolders.setIamPolicy permission.", + "location": "query", + "type": "boolean" + }, "bucket": { "description": "Name of the bucket containing the managed folder.", "location": "path", @@ -3141,7 +3188,8 @@ "id": "storage.objects.restore", "parameterOrder": [ "bucket", - "object" + "object", + "generation" ], "parameters": { "bucket": { @@ -3212,9 +3260,6 @@ } }, "path": "b/{bucket}/o/{object}/restore", - "request": { - "$ref": "Object" - }, "response": { "$ref": "Object" }, @@ -4040,7 +4085,7 @@ } } }, - "revision": "20240209", + "revision": "20240625", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -4659,6 +4704,53 @@ }, "type": "object" }, + "BucketStorageLayout": { + "description": "The storage layout configuration of a bucket.", + "id": "BucketStorageLayout", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "customPlacementConfig": { + "description": "The bucket's custom placement configuration for Custom Dual Regions.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "hierarchicalNamespace": { + "description": "The bucket's hierarchical namespace configuration.", + "properties": { + "enabled": { + "description": "When set to true, hierarchical namespace is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "kind": { + "default": "storage#storageLayout", + "description": "The kind of item this is. For storage layout, this is always storage#storageLayout.", + "type": "string" + }, + "location": { + "description": "The location of the bucket.", + "type": "string" + }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + } + }, + "type": "object" + }, "Buckets": { "description": "A list of buckets.", "id": "Buckets", @@ -4925,6 +5017,11 @@ "description": "The response message for storage.buckets.operations.list.", "id": "GoogleLongrunningListOperationsResponse", "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, "nextPageToken": { "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" @@ -4951,6 +5048,11 @@ "$ref": "GoogleRpcStatus", "description": "The error result of the operation in case of failure or cancellation." }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, "metadata": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -4970,6 +5072,10 @@ }, "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 6f01795f..cfa73e2e 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -100,7 +100,6 @@ const apiVersion = "v1" const basePath = "https://storage.googleapis.com/storage/v1/" const basePathTemplate = "https://storage.UNIVERSE_DOMAIN/storage/v1/" const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/" -const defaultUniverseDomain = "googleapis.com" // OAuth2 scopes used by this API. const ( @@ -134,7 +133,6 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) - opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -347,462 +345,344 @@ type ProjectsServiceAccountService struct { type AnywhereCache struct { // AdmissionPolicy: The cache-level entry admission policy. AdmissionPolicy string `json:"admissionPolicy,omitempty"` - // AnywhereCacheId: The ID of the Anywhere cache instance. AnywhereCacheId string `json:"anywhereCacheId,omitempty"` - // Bucket: The name of the bucket containing this cache instance. Bucket string `json:"bucket,omitempty"` - - // CreateTime: The creation time of the cache instance in RFC 3339 - // format. + // CreateTime: The creation time of the cache instance in RFC 3339 format. CreateTime string `json:"createTime,omitempty"` - - // Id: The ID of the resource, including the project number, bucket name - // and anywhere cache ID. + // Id: The ID of the resource, including the project number, bucket name and + // anywhere cache ID. Id string `json:"id,omitempty"` - // Kind: The kind of item this is. For Anywhere Cache, this is always // storage#anywhereCache. Kind string `json:"kind,omitempty"` - - // PendingUpdate: True if the cache instance has an active Update - // long-running operation. + // PendingUpdate: True if the cache instance has an active Update long-running + // operation. PendingUpdate bool `json:"pendingUpdate,omitempty"` - // SelfLink: The link to this cache instance. SelfLink string `json:"selfLink,omitempty"` - // State: The current state of the cache instance. State string `json:"state,omitempty"` - // Ttl: The TTL of all cache entries in whole seconds. e.g., "7200s". Ttl string `json:"ttl,omitempty"` - - // UpdateTime: The modification time of the cache instance metadata in - // RFC 3339 format. + // UpdateTime: The modification time of the cache instance metadata in RFC 3339 + // format. UpdateTime string `json:"updateTime,omitempty"` - // Zone: The zone in which the cache instance is running. For example, // us-central1-a. Zone string `json:"zone,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AdmissionPolicy") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdmissionPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AdmissionPolicy") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AnywhereCache) MarshalJSON() ([]byte, error) { +func (s AnywhereCache) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCache - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // AnywhereCaches: A list of Anywhere Caches. type AnywhereCaches struct { // Items: The list of items. Items []*AnywhereCache `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of Anywhere Caches, this is - // always storage#anywhereCaches. + // Kind: The kind of item this is. For lists of Anywhere Caches, this is always + // storage#anywhereCaches. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { +func (s AnywhereCaches) MarshalJSON() ([]byte, error) { type NoMethod AnywhereCaches - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Bucket: A bucket. type Bucket struct { // Acl: Access controls on the bucket. Acl []*BucketAccessControl `json:"acl,omitempty"` - // Autoclass: The bucket's Autoclass configuration. Autoclass *BucketAutoclass `json:"autoclass,omitempty"` - // Billing: The bucket's billing configuration. Billing *BucketBilling `json:"billing,omitempty"` - - // Cors: The bucket's Cross-Origin Resource Sharing (CORS) - // configuration. + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration. Cors []*BucketCors `json:"cors,omitempty"` - - // CustomPlacementConfig: The bucket's custom placement configuration - // for Custom Dual Regions. + // CustomPlacementConfig: The bucket's custom placement configuration for + // Custom Dual Regions. CustomPlacementConfig *BucketCustomPlacementConfig `json:"customPlacementConfig,omitempty"` - - // DefaultEventBasedHold: The default value for event-based hold on - // newly created objects in this bucket. Event-based hold is a way to - // retain objects indefinitely until an event occurs, signified by the - // hold's release. After being released, such objects will be subject to - // bucket-level retention (if any). One sample use case of this flag is - // for banks to hold loan documents for at least 3 years after loan is - // paid in full. Here, bucket-level retention is 3 years and the event - // is loan being paid in full. In this example, these objects will be - // held intact for any number of years until the event has occurred - // (event-based hold on the object is released) and then 3 more years - // after that. That means retention duration of the objects begins from - // the moment event-based hold transitioned from true to false. Objects - // under event-based hold cannot be deleted, overwritten or archived - // until the hold is removed. + // DefaultEventBasedHold: The default value for event-based hold on newly + // created objects in this bucket. Event-based hold is a way to retain objects + // indefinitely until an event occurs, signified by the hold's release. After + // being released, such objects will be subject to bucket-level retention (if + // any). One sample use case of this flag is for banks to hold loan documents + // for at least 3 years after loan is paid in full. Here, bucket-level + // retention is 3 years and the event is loan being paid in full. In this + // example, these objects will be held intact for any number of years until the + // event has occurred (event-based hold on the object is released) and then 3 + // more years after that. That means retention duration of the objects begins + // from the moment event-based hold transitioned from true to false. Objects + // under event-based hold cannot be deleted, overwritten or archived until the + // hold is removed. DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` - - // DefaultObjectAcl: Default access controls to apply to new objects - // when no ACL is provided. + // DefaultObjectAcl: Default access controls to apply to new objects when no + // ACL is provided. DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` - // Encryption: Encryption configuration for a bucket. Encryption *BucketEncryption `json:"encryption,omitempty"` - // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` - - // HierarchicalNamespace: The bucket's hierarchical namespace - // configuration. + // HierarchicalNamespace: The bucket's hierarchical namespace configuration. HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` - // IamConfiguration: The bucket's IAM configuration. IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"` - - // Id: The ID of the bucket. For buckets, the id and name properties are - // the same. + // Id: The ID of the bucket. For buckets, the id and name properties are the + // same. Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For buckets, this is always - // storage#bucket. + // Kind: The kind of item this is. For buckets, this is always storage#bucket. Kind string `json:"kind,omitempty"` - // Labels: User-provided labels, in key/value pairs. Labels map[string]string `json:"labels,omitempty"` - - // Lifecycle: The bucket's lifecycle configuration. See lifecycle - // management for more information. + // Lifecycle: The bucket's lifecycle configuration. See lifecycle management + // for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` - - // Location: The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to - // US. See the developer's guide for the authoritative list. + // Location: The location of the bucket. Object data for objects in the bucket + // resides in physical storage within this region. Defaults to US. See the + // developer's guide for the authoritative list. Location string `json:"location,omitempty"` - // LocationType: The type of the bucket location. LocationType string `json:"locationType,omitempty"` - - // Logging: The bucket's logging configuration, which defines the - // destination bucket and optional name prefix for the current bucket's - // logs. + // Logging: The bucket's logging configuration, which defines the destination + // bucket and optional name prefix for the current bucket's logs. Logging *BucketLogging `json:"logging,omitempty"` - // Metageneration: The metadata generation of this bucket. Metageneration int64 `json:"metageneration,omitempty,string"` - // Name: The name of the bucket. Name string `json:"name,omitempty"` - // ObjectRetention: The bucket's object retention config. ObjectRetention *BucketObjectRetention `json:"objectRetention,omitempty"` - - // Owner: The owner of the bucket. This is always the project team's - // owner group. + // Owner: The owner of the bucket. This is always the project team's owner + // group. Owner *BucketOwner `json:"owner,omitempty"` - - // ProjectNumber: The project number of the project the bucket belongs - // to. + // ProjectNumber: The project number of the project the bucket belongs to. ProjectNumber uint64 `json:"projectNumber,omitempty,string"` - // RetentionPolicy: The bucket's retention policy. The retention policy - // enforces a minimum retention time for all objects contained in the - // bucket, based on their creation time. Any attempt to overwrite or - // delete objects younger than the retention period will result in a - // PERMISSION_DENIED error. An unlocked retention policy can be modified - // or removed from the bucket via a storage.buckets.update operation. A - // locked retention policy cannot be removed or shortened in duration - // for the lifetime of the bucket. Attempting to remove or decrease - // period of a locked retention policy will result in a + // enforces a minimum retention time for all objects contained in the bucket, + // based on their creation time. Any attempt to overwrite or delete objects + // younger than the retention period will result in a PERMISSION_DENIED error. + // An unlocked retention policy can be modified or removed from the bucket via + // a storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. Attempting + // to remove or decrease period of a locked retention policy will result in a // PERMISSION_DENIED error. RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"` - - // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to - // ASYNC_TURBO to turn on Turbo Replication on a bucket. + // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO + // to turn on Turbo Replication on a bucket. Rpo string `json:"rpo,omitempty"` - // SatisfiesPZS: Reserved for future use. SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` - // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` - - // SoftDeletePolicy: The bucket's soft delete policy, which defines the - // period of time that soft-deleted objects will be retained, and cannot - // be permanently deleted. + // SoftDeletePolicy: The bucket's soft delete policy, which defines the period + // of time that soft-deleted objects will be retained, and cannot be + // permanently deleted. SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` - // StorageClass: The bucket's default storage class, used whenever no - // storageClass is specified for a newly-created object. This defines - // how objects in the bucket are stored and determines the SLA and the - // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, - // NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If - // this value is not specified when the bucket is created, it will - // default to STANDARD. For more information, see storage classes. + // storageClass is specified for a newly-created object. This defines how + // objects in the bucket are stored and determines the SLA and the cost of + // storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, + // COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not + // specified when the bucket is created, it will default to STANDARD. For more + // information, see storage classes. StorageClass string `json:"storageClass,omitempty"` - // TimeCreated: The creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - // Updated: The modification time of the bucket in RFC 3339 format. Updated string `json:"updated,omitempty"` - // Versioning: The bucket's versioning configuration. Versioning *BucketVersioning `json:"versioning,omitempty"` - - // Website: The bucket's website configuration, controlling how the - // service behaves when accessing bucket contents as a web site. See the - // Static Website Examples for more information. + // Website: The bucket's website configuration, controlling how the service + // behaves when accessing bucket contents as a web site. See the Static Website + // Examples for more information. Website *BucketWebsite `json:"website,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Acl") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Acl") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Bucket) MarshalJSON() ([]byte, error) { +func (s Bucket) MarshalJSON() ([]byte, error) { type NoMethod Bucket - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAutoclass: The bucket's Autoclass configuration. type BucketAutoclass struct { // Enabled: Whether or not Autoclass is enabled on this bucket Enabled bool `json:"enabled,omitempty"` - // TerminalStorageClass: The storage class that objects in the bucket - // eventually transition to if they are not read for a certain length of - // time. Valid values are NEARLINE and ARCHIVE. + // eventually transition to if they are not read for a certain length of time. + // Valid values are NEARLINE and ARCHIVE. TerminalStorageClass string `json:"terminalStorageClass,omitempty"` - // TerminalStorageClassUpdateTime: A date and time in RFC 3339 format - // representing the time of the most recent update to - // "terminalStorageClass". + // representing the time of the most recent update to "terminalStorageClass". TerminalStorageClassUpdateTime string `json:"terminalStorageClassUpdateTime,omitempty"` - - // ToggleTime: A date and time in RFC 3339 format representing the - // instant at which "enabled" was last toggled. + // ToggleTime: A date and time in RFC 3339 format representing the instant at + // which "enabled" was last toggled. ToggleTime string `json:"toggleTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketAutoclass) MarshalJSON() ([]byte, error) { +func (s BucketAutoclass) MarshalJSON() ([]byte, error) { type NoMethod BucketAutoclass - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketBilling: The bucket's billing configuration. type BucketBilling struct { - // RequesterPays: When set to true, Requester Pays is enabled for this - // bucket. + // RequesterPays: When set to true, Requester Pays is enabled for this bucket. RequesterPays bool `json:"requesterPays,omitempty"` - // ForceSendFields is a list of field names (e.g. "RequesterPays") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RequesterPays") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "RequesterPays") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketBilling) MarshalJSON() ([]byte, error) { +func (s BucketBilling) MarshalJSON() ([]byte, error) { type NoMethod BucketBilling - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketCors struct { // MaxAgeSeconds: The value, in seconds, to return in the // Access-Control-Max-Age header used in preflight responses. MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` - - // Method: The list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". + // Method: The list of HTTP methods on which to include CORS response headers, + // (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and + // means "any method". Method []string `json:"method,omitempty"` - - // Origin: The list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". + // Origin: The list of Origins eligible to receive CORS response headers. Note: + // "*" is permitted in the list of origins, and means "any Origin". Origin []string `json:"origin,omitempty"` - - // ResponseHeader: The list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. + // ResponseHeader: The list of HTTP headers other than the simple response + // headers to give permission for the user-agent to share across domains. ResponseHeader []string `json:"responseHeader,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketCors) MarshalJSON() ([]byte, error) { +func (s BucketCors) MarshalJSON() ([]byte, error) { type NoMethod BucketCors - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketCustomPlacementConfig: The bucket's custom placement -// configuration for Custom Dual Regions. +// BucketCustomPlacementConfig: The bucket's custom placement configuration for +// Custom Dual Regions. type BucketCustomPlacementConfig struct { - // DataLocations: The list of regional locations in which data is - // placed. + // DataLocations: The list of regional locations in which data is placed. DataLocations []string `json:"dataLocations,omitempty"` - // ForceSendFields is a list of field names (e.g. "DataLocations") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DataLocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { +func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { type NoMethod BucketCustomPlacementConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketEncryption: Encryption configuration for a bucket. type BucketEncryption struct { - // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt - // objects inserted into this bucket, if no encryption method is - // specified. + // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketEncryption) MarshalJSON() ([]byte, error) { +func (s BucketEncryption) MarshalJSON() ([]byte, error) { type NoMethod BucketEncryption - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketHierarchicalNamespace: The bucket's hierarchical namespace @@ -811,600 +691,469 @@ type BucketHierarchicalNamespace struct { // Enabled: When set to true, hierarchical namespace is enabled for this // bucket. Enabled bool `json:"enabled,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { +func (s BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { type NoMethod BucketHierarchicalNamespace - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfiguration: The bucket's IAM configuration. type BucketIamConfiguration struct { - // BucketPolicyOnly: The bucket's uniform bucket-level access - // configuration. The feature was formerly known as Bucket Policy Only. - // For backward compatibility, this field will be populated with - // identical information as the uniformBucketLevelAccess field. We - // recommend using the uniformBucketLevelAccess field to enable and - // disable the feature. + // BucketPolicyOnly: The bucket's uniform bucket-level access configuration. + // The feature was formerly known as Bucket Policy Only. For backward + // compatibility, this field will be populated with identical information as + // the uniformBucketLevelAccess field. We recommend using the + // uniformBucketLevelAccess field to enable and disable the feature. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` - - // PublicAccessPrevention: The bucket's Public Access Prevention - // configuration. Currently, 'inherited' and 'enforced' are supported. + // PublicAccessPrevention: The bucket's Public Access Prevention configuration. + // Currently, 'inherited' and 'enforced' are supported. PublicAccessPrevention string `json:"publicAccessPrevention,omitempty"` - // UniformBucketLevelAccess: The bucket's uniform bucket-level access // configuration. UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` - // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketPolicyOnly") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "BucketPolicyOnly") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { +func (s BucketIamConfiguration) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfiguration - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform -// bucket-level access configuration. The feature was formerly known as -// Bucket Policy Only. For backward compatibility, this field will be -// populated with identical information as the uniformBucketLevelAccess -// field. We recommend using the uniformBucketLevelAccess field to -// enable and disable the feature. +// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform bucket-level +// access configuration. The feature was formerly known as Bucket Policy Only. +// For backward compatibility, this field will be populated with identical +// information as the uniformBucketLevelAccess field. We recommend using the +// uniformBucketLevelAccess field to enable and disable the feature. type BucketIamConfigurationBucketPolicyOnly struct { - // Enabled: If set, access is controlled only by bucket-level or above - // IAM policies. + // Enabled: If set, access is controlled only by bucket-level or above IAM + // policies. Enabled bool `json:"enabled,omitempty"` - // LockedTime: The deadline for changing - // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC - // 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed - // from true to false until the locked time, after which the field is - // immutable. + // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 + // format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true + // to false until the locked time, after which the field is immutable. LockedTime string `json:"lockedTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationBucketPolicyOnly - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform // bucket-level access configuration. type BucketIamConfigurationUniformBucketLevelAccess struct { - // Enabled: If set, access is controlled only by bucket-level or above - // IAM policies. + // Enabled: If set, access is controlled only by bucket-level or above IAM + // policies. Enabled bool `json:"enabled,omitempty"` - // LockedTime: The deadline for changing - // iamConfiguration.uniformBucketLevelAccess.enabled from true to false - // in RFC 3339 format. - // iamConfiguration.uniformBucketLevelAccess.enabled may be changed from - // true to false until the locked time, after which the field is + // iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC + // 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be + // changed from true to false until the locked time, after which the field is // immutable. LockedTime string `json:"lockedTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { +func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { type NoMethod BucketIamConfigurationUniformBucketLevelAccess - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. type BucketLifecycle struct { - // Rule: A lifecycle management rule, which is made of an action to take - // and the condition(s) under which the action will be taken. + // Rule: A lifecycle management rule, which is made of an action to take and + // the condition(s) under which the action will be taken. Rule []*BucketLifecycleRule `json:"rule,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rule") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Rule") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rule") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Rule") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { +func (s BucketLifecycle) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycle - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type BucketLifecycleRule struct { // Action: The action to take. Action *BucketLifecycleRuleAction `json:"action,omitempty"` - // Condition: The condition(s) under which the action will be taken. Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Action") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRule) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLifecycleRuleAction: The action to take. type BucketLifecycleRuleAction struct { - // StorageClass: Target storage class. Required iff the type of the - // action is SetStorageClass. + // StorageClass: Target storage class. Required iff the type of the action is + // SetStorageClass. StorageClass string `json:"storageClass,omitempty"` - - // Type: Type of the action. Currently, only Delete, SetStorageClass, - // and AbortIncompleteMultipartUpload are supported. + // Type: Type of the action. Currently, only Delete, SetStorageClass, and + // AbortIncompleteMultipartUpload are supported. Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "StorageClass") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "StorageClass") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "StorageClass") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleAction - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketLifecycleRuleCondition: The condition(s) under which the action -// will be taken. +// BucketLifecycleRuleCondition: The condition(s) under which the action will +// be taken. type BucketLifecycleRuleCondition struct { - // Age: Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. + // Age: Age of an object (in days). This condition is satisfied when an object + // reaches the specified age. Age *int64 `json:"age,omitempty"` - // CreatedBefore: A date in RFC 3339 format with only the date part (for - // instance, "2013-01-15"). This condition is satisfied when an object - // is created before midnight of the specified date in UTC. + // instance, "2013-01-15"). This condition is satisfied when an object is + // created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` - - // CustomTimeBefore: A date in RFC 3339 format with only the date part - // (for instance, "2013-01-15"). This condition is satisfied when the - // custom time on an object is before this date in UTC. + // CustomTimeBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when the custom time on + // an object is before this date in UTC. CustomTimeBefore string `json:"customTimeBefore,omitempty"` - // DaysSinceCustomTime: Number of days elapsed since the user-specified - // timestamp set on an object. The condition is satisfied if the days - // elapsed is at least this number. If no custom timestamp is specified - // on an object, the condition does not apply. + // timestamp set on an object. The condition is satisfied if the days elapsed + // is at least this number. If no custom timestamp is specified on an object, + // the condition does not apply. DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` - // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent - // timestamp of an object. The condition is satisfied if the days - // elapsed is at least this number. This condition is relevant only for - // versioned objects. The value of the field must be a nonnegative - // integer. If it's zero, the object version will become eligible for - // Lifecycle action as soon as it becomes noncurrent. + // timestamp of an object. The condition is satisfied if the days elapsed is at + // least this number. This condition is relevant only for versioned objects. + // The value of the field must be a nonnegative integer. If it's zero, the + // object version will become eligible for Lifecycle action as soon as it + // becomes noncurrent. DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` - - // IsLive: Relevant only for versioned objects. If the value is true, - // this condition matches live objects; if the value is false, it - // matches archived objects. + // IsLive: Relevant only for versioned objects. If the value is true, this + // condition matches live objects; if the value is false, it matches archived + // objects. IsLive *bool `json:"isLive,omitempty"` - - // MatchesPattern: A regular expression that satisfies the RE2 syntax. - // This condition is satisfied when the name of the object matches the - // RE2 pattern. Note: This feature is currently in the "Early Access" - // launch stage and is only available to a whitelisted set of users; - // that means that this feature may be changed in backward-incompatible - // ways and that it is not guaranteed to be released. + // MatchesPattern: A regular expression that satisfies the RE2 syntax. This + // condition is satisfied when the name of the object matches the RE2 pattern. + // Note: This feature is currently in the "Early Access" launch stage and is + // only available to a whitelisted set of users; that means that this feature + // may be changed in backward-incompatible ways and that it is not guaranteed + // to be released. MatchesPattern string `json:"matchesPattern,omitempty"` - // MatchesPrefix: List of object name prefixes. This condition will be - // satisfied when at least one of the prefixes exactly matches the - // beginning of the object name. + // satisfied when at least one of the prefixes exactly matches the beginning of + // the object name. MatchesPrefix []string `json:"matchesPrefix,omitempty"` - - // MatchesStorageClass: Objects having any of the storage classes - // specified by this condition will be matched. Values include - // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and - // DURABLE_REDUCED_AVAILABILITY. + // MatchesStorageClass: Objects having any of the storage classes specified by + // this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, + // NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` - // MatchesSuffix: List of object name suffixes. This condition will be - // satisfied when at least one of the suffixes exactly matches the end - // of the object name. + // satisfied when at least one of the suffixes exactly matches the end of the + // object name. MatchesSuffix []string `json:"matchesSuffix,omitempty"` - - // NoncurrentTimeBefore: A date in RFC 3339 format with only the date - // part (for instance, "2013-01-15"). This condition is satisfied when - // the noncurrent time on an object is before this date in UTC. This - // condition is relevant only for versioned objects. + // NoncurrentTimeBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when the noncurrent + // time on an object is before this date in UTC. This condition is relevant + // only for versioned objects. NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` - - // NumNewerVersions: Relevant only for versioned objects. If the value - // is N, this condition is satisfied when there are at least N versions - // (including the live version) newer than this version of the object. + // NumNewerVersions: Relevant only for versioned objects. If the value is N, + // this condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. NumNewerVersions int64 `json:"numNewerVersions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Age") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Age") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Age") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Age") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { +func (s BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type NoMethod BucketLifecycleRuleCondition - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketLogging: The bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. +// destination bucket and optional name prefix for the current bucket's logs. type BucketLogging struct { - // LogBucket: The destination bucket where the current bucket's logs - // should be placed. + // LogBucket: The destination bucket where the current bucket's logs should be + // placed. LogBucket string `json:"logBucket,omitempty"` - // LogObjectPrefix: A prefix for log object names. LogObjectPrefix string `json:"logObjectPrefix,omitempty"` - // ForceSendFields is a list of field names (e.g. "LogBucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogBucket") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "LogBucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketLogging) MarshalJSON() ([]byte, error) { +func (s BucketLogging) MarshalJSON() ([]byte, error) { type NoMethod BucketLogging - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketObjectRetention: The bucket's object retention config. type BucketObjectRetention struct { // Mode: The bucket's object retention mode. Can be Enabled. Mode string `json:"mode,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Mode") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Mode") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { +func (s BucketObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod BucketObjectRetention - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketOwner: The owner of the bucket. This is always the project -// team's owner group. +// BucketOwner: The owner of the bucket. This is always the project team's +// owner group. type BucketOwner struct { // Entity: The entity, in the form project-owner-projectId. Entity string `json:"entity,omitempty"` - // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Entity") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketOwner) MarshalJSON() ([]byte, error) { +func (s BucketOwner) MarshalJSON() ([]byte, error) { type NoMethod BucketOwner - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketRetentionPolicy: The bucket's retention policy. The retention -// policy enforces a minimum retention time for all objects contained in -// the bucket, based on their creation time. Any attempt to overwrite or -// delete objects younger than the retention period will result in a -// PERMISSION_DENIED error. An unlocked retention policy can be modified -// or removed from the bucket via a storage.buckets.update operation. A -// locked retention policy cannot be removed or shortened in duration -// for the lifetime of the bucket. Attempting to remove or decrease -// period of a locked retention policy will result in a +// BucketRetentionPolicy: The bucket's retention policy. The retention policy +// enforces a minimum retention time for all objects contained in the bucket, +// based on their creation time. Any attempt to overwrite or delete objects +// younger than the retention period will result in a PERMISSION_DENIED error. +// An unlocked retention policy can be modified or removed from the bucket via +// a storage.buckets.update operation. A locked retention policy cannot be +// removed or shortened in duration for the lifetime of the bucket. Attempting +// to remove or decrease period of a locked retention policy will result in a // PERMISSION_DENIED error. type BucketRetentionPolicy struct { - // EffectiveTime: Server-determined value that indicates the time from - // which policy was enforced and effective. This value is in RFC 3339 - // format. + // EffectiveTime: Server-determined value that indicates the time from which + // policy was enforced and effective. This value is in RFC 3339 format. EffectiveTime string `json:"effectiveTime,omitempty"` - // IsLocked: Once locked, an object retention policy cannot be modified. IsLocked bool `json:"isLocked,omitempty"` - - // RetentionPeriod: The duration in seconds that objects need to be - // retained. Retention duration must be greater than zero and less than - // 100 years. Note that enforcement of retention periods less than a day - // is not guaranteed. Such periods should only be used for testing - // purposes. + // RetentionPeriod: The duration in seconds that objects need to be retained. + // Retention duration must be greater than zero and less than 100 years. Note + // that enforcement of retention periods less than a day is not guaranteed. + // Such periods should only be used for testing purposes. RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "EffectiveTime") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EffectiveTime") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "EffectiveTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { +func (s BucketRetentionPolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketRetentionPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketSoftDeletePolicy: The bucket's soft delete policy, which -// defines the period of time that soft-deleted objects will be -// retained, and cannot be permanently deleted. +// BucketSoftDeletePolicy: The bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. type BucketSoftDeletePolicy struct { - // EffectiveTime: Server-determined value that indicates the time from - // which the policy, or one with a greater retention, was effective. - // This value is in RFC 3339 format. + // EffectiveTime: Server-determined value that indicates the time from which + // the policy, or one with a greater retention, was effective. This value is in + // RFC 3339 format. EffectiveTime string `json:"effectiveTime,omitempty"` - - // RetentionDurationSeconds: The duration in seconds that soft-deleted - // objects in the bucket will be retained and cannot be permanently - // deleted. + // RetentionDurationSeconds: The duration in seconds that soft-deleted objects + // in the bucket will be retained and cannot be permanently deleted. RetentionDurationSeconds int64 `json:"retentionDurationSeconds,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "EffectiveTime") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EffectiveTime") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "EffectiveTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { +func (s BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { type NoMethod BucketSoftDeletePolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketVersioning: The bucket's versioning configuration. type BucketVersioning struct { - // Enabled: While set to true, versioning is fully enabled for this - // bucket. + // Enabled: While set to true, versioning is fully enabled for this bucket. Enabled bool `json:"enabled,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { +func (s BucketVersioning) MarshalJSON() ([]byte, error) { type NoMethod BucketVersioning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketWebsite: The bucket's website configuration, controlling how -// the service behaves when accessing bucket contents as a web site. See -// the Static Website Examples for more information. +// BucketWebsite: The bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See the Static +// Website Examples for more information. type BucketWebsite struct { - // MainPageSuffix: If the requested object path is missing, the service - // will ensure the path has a trailing '/', append this suffix, and - // attempt to retrieve the resulting object. This allows the creation of - // index.html objects to represent directory pages. + // MainPageSuffix: If the requested object path is missing, the service will + // ensure the path has a trailing '/', append this suffix, and attempt to + // retrieve the resulting object. This allows the creation of index.html + // objects to represent directory pages. MainPageSuffix string `json:"mainPageSuffix,omitempty"` - // NotFoundPage: If the requested object path is missing, and any - // mainPageSuffix object is missing, if applicable, the service will - // return the named object from this bucket as the content for a 404 Not - // Found result. + // mainPageSuffix object is missing, if applicable, the service will return the + // named object from this bucket as the content for a 404 Not Found result. NotFoundPage string `json:"notFoundPage,omitempty"` - // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MainPageSuffix") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "MainPageSuffix") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { +func (s BucketWebsite) MarshalJSON() ([]byte, error) { type NoMethod BucketWebsite - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControl: An access-control entry. type BucketAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` - // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` - // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: + // Entity: The entity holding the permission, in one of the following forms: // - user-userId // - user-email // - group-groupId @@ -1419,1363 +1168,1123 @@ type BucketAccessControl struct { // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` - // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` - // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` - // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For bucket access control entries, - // this is always storage#bucketAccessControl. + // Kind: The kind of item this is. For bucket access control entries, this is + // always storage#bucketAccessControl. Kind string `json:"kind,omitempty"` - // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` - // Role: The access permission for the entity. Role string `json:"role,omitempty"` - // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { +func (s BucketAccessControl) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControl - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketAccessControlProjectTeam: The project team associated with the -// entity, if any. +// BucketAccessControlProjectTeam: The project team associated with the entity, +// if any. type BucketAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` - // Team: The team. Team string `json:"team,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ProjectNumber") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControlProjectTeam - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BucketAccessControls: An access-control list. type BucketAccessControls struct { // Items: The list of items. Items []*BucketAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of bucket access control - // entries, this is always storage#bucketAccessControls. + // Kind: The kind of item this is. For lists of bucket access control entries, + // this is always storage#bucketAccessControls. Kind string `json:"kind,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { +func (s BucketAccessControls) MarshalJSON() ([]byte, error) { type NoMethod BucketAccessControls - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayout: The storage layout configuration of a bucket. +type BucketStorageLayout struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + // CustomPlacementConfig: The bucket's custom placement configuration for + // Custom Dual Regions. + CustomPlacementConfig *BucketStorageLayoutCustomPlacementConfig `json:"customPlacementConfig,omitempty"` + // HierarchicalNamespace: The bucket's hierarchical namespace configuration. + HierarchicalNamespace *BucketStorageLayoutHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` + // Kind: The kind of item this is. For storage layout, this is always + // storage#storageLayout. + Kind string `json:"kind,omitempty"` + // Location: The location of the bucket. + Location string `json:"location,omitempty"` + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayout) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayout + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutCustomPlacementConfig: The bucket's custom placement +// configuration for Custom Dual Regions. +type BucketStorageLayoutCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutHierarchicalNamespace: The bucket's hierarchical +// namespace configuration. +type BucketStorageLayoutHierarchicalNamespace struct { + // Enabled: When set to true, hierarchical namespace is enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutHierarchicalNamespace + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Buckets: A list of buckets. type Buckets struct { // Items: The list of items. Items []*Bucket `json:"items,omitempty"` - // Kind: The kind of item this is. For lists of buckets, this is always // storage#buckets. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Buckets) MarshalJSON() ([]byte, error) { +func (s Buckets) MarshalJSON() ([]byte, error) { type NoMethod Buckets - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // BulkRestoreObjectsRequest: A bulk restore objects request. type BulkRestoreObjectsRequest struct { - // AllowOverwrite: If false (default), the restore will not overwrite - // live objects with the same name at the destination. This means some - // deleted objects may be skipped. If true, live objects will be - // overwritten resulting in a noncurrent object (if versioning is - // enabled). If versioning is not enabled, overwriting the object will - // result in a soft-deleted object. In either case, if a noncurrent - // object already exists with the same name, a live version can be - // written without issue. + // AllowOverwrite: If false (default), the restore will not overwrite live + // objects with the same name at the destination. This means some deleted + // objects may be skipped. If true, live objects will be overwritten resulting + // in a noncurrent object (if versioning is enabled). If versioning is not + // enabled, overwriting the object will result in a soft-deleted object. In + // either case, if a noncurrent object already exists with the same name, a + // live version can be written without issue. AllowOverwrite bool `json:"allowOverwrite,omitempty"` - - // CopySourceAcl: If true, copies the source object's ACL; otherwise, - // uses the bucket's default object ACL. The default is false. + // CopySourceAcl: If true, copies the source object's ACL; otherwise, uses the + // bucket's default object ACL. The default is false. CopySourceAcl bool `json:"copySourceAcl,omitempty"` - - // MatchGlobs: Restores only the objects matching any of the specified - // glob(s). If this parameter is not specified, all objects will be - // restored within the specified time range. + // MatchGlobs: Restores only the objects matching any of the specified glob(s). + // If this parameter is not specified, all objects will be restored within the + // specified time range. MatchGlobs []string `json:"matchGlobs,omitempty"` - - // SoftDeletedAfterTime: Restores only the objects that were - // soft-deleted after this time. + // SoftDeletedAfterTime: Restores only the objects that were soft-deleted after + // this time. SoftDeletedAfterTime string `json:"softDeletedAfterTime,omitempty"` - - // SoftDeletedBeforeTime: Restores only the objects that were - // soft-deleted before this time. + // SoftDeletedBeforeTime: Restores only the objects that were soft-deleted + // before this time. SoftDeletedBeforeTime string `json:"softDeletedBeforeTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "AllowOverwrite") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AllowOverwrite") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AllowOverwrite") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { +func (s BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { type NoMethod BulkRestoreObjectsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Channel: An notification channel used to watch for resource changes. type Channel struct { - // Address: The address where notifications are delivered for this - // channel. + // Address: The address where notifications are delivered for this channel. Address string `json:"address,omitempty"` - - // Expiration: Date and time of notification channel expiration, - // expressed as a Unix timestamp, in milliseconds. Optional. + // Expiration: Date and time of notification channel expiration, expressed as a + // Unix timestamp, in milliseconds. Optional. Expiration int64 `json:"expiration,omitempty,string"` - // Id: A UUID or similar unique string that identifies this channel. Id string `json:"id,omitempty"` - - // Kind: Identifies this as a notification channel used to watch for - // changes to a resource, which is "api#channel". + // Kind: Identifies this as a notification channel used to watch for changes to + // a resource, which is "api#channel". Kind string `json:"kind,omitempty"` - // Params: Additional parameters controlling delivery channel behavior. // Optional. Params map[string]string `json:"params,omitempty"` - - // Payload: A Boolean value to indicate whether payload is wanted. - // Optional. + // Payload: A Boolean value to indicate whether payload is wanted. Optional. Payload bool `json:"payload,omitempty"` - - // ResourceId: An opaque ID that identifies the resource being watched - // on this channel. Stable across different API versions. + // ResourceId: An opaque ID that identifies the resource being watched on this + // channel. Stable across different API versions. ResourceId string `json:"resourceId,omitempty"` - // ResourceUri: A version-specific identifier for the watched resource. ResourceUri string `json:"resourceUri,omitempty"` - // Token: An arbitrary string delivered to the target address with each // notification delivered over this channel. Optional. Token string `json:"token,omitempty"` - // Type: The type of delivery mechanism used for this channel. Type string `json:"type,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Address") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Address") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Address") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Address") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Channel) MarshalJSON() ([]byte, error) { +func (s Channel) MarshalJSON() ([]byte, error) { type NoMethod Channel - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ComposeRequest: A Compose request. type ComposeRequest struct { // Destination: Properties of the resulting object. Destination *Object `json:"destination,omitempty"` - // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` - - // SourceObjects: The list of source objects that will be concatenated - // into a single object. + // SourceObjects: The list of source objects that will be concatenated into a + // single object. SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` - // ForceSendFields is a list of field names (e.g. "Destination") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Destination") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Destination") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { +func (s ComposeRequest) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type ComposeRequestSourceObjects struct { // Generation: The generation of this object to use as the source. Generation int64 `json:"generation,omitempty,string"` - - // Name: The source object's name. All source objects must reside in the - // same bucket. + // Name: The source object's name. All source objects must reside in the same + // bucket. Name string `json:"name,omitempty"` - - // ObjectPreconditions: Conditions that must be met for this operation - // to execute. + // ObjectPreconditions: Conditions that must be met for this operation to + // execute. ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` - // ForceSendFields is a list of field names (e.g. "Generation") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Generation") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Generation") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjects - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must -// be met for this operation to execute. +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must be met +// for this operation to execute. type ComposeRequestSourceObjectsObjectPreconditions struct { - // IfGenerationMatch: Only perform the composition if the generation of - // the source object that would be used matches this value. If this - // value and a generation are both specified, they must be the same - // value or the call will fail. + // IfGenerationMatch: Only perform the composition if the generation of the + // source object that would be used matches this value. If this value and a + // generation are both specified, they must be the same value or the call will + // fail. IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IfGenerationMatch") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "IfGenerationMatch") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { +func (s ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type NoMethod ComposeRequestSourceObjectsObjectPreconditions - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Expr: Represents an expression text. Example: title: "User account -// presence" description: "Determines whether the request has a user -// account" expression: "size(request.user) > 0" +// Expr: Represents an expression text. Example: title: "User account presence" +// description: "Determines whether the request has a user account" expression: +// "size(request.user) > 0" type Expr struct { - // Description: An optional description of the expression. This is a - // longer text which describes the expression, e.g. when hovered over it - // in a UI. + // Description: An optional description of the expression. This is a longer + // text which describes the expression, e.g. when hovered over it in a UI. Description string `json:"description,omitempty"` - - // Expression: Textual representation of an expression in Common - // Expression Language syntax. The application context of the containing - // message determines which well-known feature set of CEL is supported. + // Expression: Textual representation of an expression in Common Expression + // Language syntax. The application context of the containing message + // determines which well-known feature set of CEL is supported. Expression string `json:"expression,omitempty"` - - // Location: An optional string indicating the location of the - // expression for error reporting, e.g. a file name and a position in - // the file. + // Location: An optional string indicating the location of the expression for + // error reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` - - // Title: An optional title for the expression, i.e. a short string - // describing its purpose. This can be used e.g. in UIs which allow to - // enter the expression. + // Title: An optional title for the expression, i.e. a short string describing + // its purpose. This can be used e.g. in UIs which allow to enter the + // expression. Title string `json:"title,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Description") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Expr) MarshalJSON() ([]byte, error) { +func (s Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// Folder: A folder. Only available in buckets with hierarchical -// namespace enabled. +// Folder: A folder. Only available in buckets with hierarchical namespace +// enabled. type Folder struct { // Bucket: The name of the bucket containing this folder. Bucket string `json:"bucket,omitempty"` - // CreateTime: The creation time of the folder in RFC 3339 format. CreateTime string `json:"createTime,omitempty"` - // Id: The ID of the folder, including the bucket name, folder name. Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For folders, this is always - // storage#folder. + // Kind: The kind of item this is. For folders, this is always storage#folder. Kind string `json:"kind,omitempty"` - // Metageneration: The version of the metadata for this folder. Used for // preconditions and for detecting changes in metadata. Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the folder. Required if not specified by URL - // parameter. + // Name: The name of the folder. Required if not specified by URL parameter. Name string `json:"name,omitempty"` - - // PendingRenameInfo: Only present if the folder is part of an ongoing - // rename folder operation. Contains information which can be used to - // query the operation status. + // PendingRenameInfo: Only present if the folder is part of an ongoing rename + // folder operation. Contains information which can be used to query the + // operation status. PendingRenameInfo *FolderPendingRenameInfo `json:"pendingRenameInfo,omitempty"` - // SelfLink: The link to this folder. SelfLink string `json:"selfLink,omitempty"` - - // UpdateTime: The modification time of the folder metadata in RFC 3339 - // format. + // UpdateTime: The modification time of the folder metadata in RFC 3339 format. UpdateTime string `json:"updateTime,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Folder) MarshalJSON() ([]byte, error) { +func (s Folder) MarshalJSON() ([]byte, error) { type NoMethod Folder - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// FolderPendingRenameInfo: Only present if the folder is part of an -// ongoing rename folder operation. Contains information which can be -// used to query the operation status. +// FolderPendingRenameInfo: Only present if the folder is part of an ongoing +// rename folder operation. Contains information which can be used to query the +// operation status. type FolderPendingRenameInfo struct { // OperationId: The ID of the rename folder operation. OperationId string `json:"operationId,omitempty"` - // ForceSendFields is a list of field names (e.g. "OperationId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "OperationId") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "OperationId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { +func (s FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { type NoMethod FolderPendingRenameInfo - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Folders: A list of folders. type Folders struct { // Items: The list of items. Items []*Folder `json:"items,omitempty"` - // Kind: The kind of item this is. For lists of folders, this is always // storage#folders. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Folders) MarshalJSON() ([]byte, error) { +func (s Folders) MarshalJSON() ([]byte, error) { type NoMethod Folders - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningListOperationsResponse: The response message for // storage.buckets.operations.list. type GoogleLongrunningListOperationsResponse struct { - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - - // Operations: A list of operations that matches the specified filter in - // the request. + // Operations: A list of operations that matches the specified filter in the + // request. Operations []*GoogleLongrunningOperation `json:"operations,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningListOperationsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // GoogleLongrunningOperation: This resource represents a long-running // operation that is the result of a network API call. type GoogleLongrunningOperation struct { - // Done: If the value is "false", it means the operation is still in - // progress. If "true", the operation is completed, and either "error" - // or "response" is available. + // Done: If the value is "false", it means the operation is still in progress. + // If "true", the operation is completed, and either "error" or "response" is + // available. Done bool `json:"done,omitempty"` - - // Error: The error result of the operation in case of failure or - // cancellation. + // Error: The error result of the operation in case of failure or cancellation. Error *GoogleRpcStatus `json:"error,omitempty"` - + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` // Metadata: Service-specific metadata associated with the operation. It - // typically contains progress information and common metadata such as - // create time. Some services might not provide such metadata. Any - // method that returns a long-running operation should document the - // metadata type, if any. + // typically contains progress information and common metadata such as create + // time. Some services might not provide such metadata. Any method that returns + // a long-running operation should document the metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` - - // Name: The server-assigned name, which is only unique within the same - // service that originally returns it. If you use the default HTTP - // mapping, the "name" should be a resource name ending with - // "operations/{operationId}". + // Name: The server-assigned name, which is only unique within the same service + // that originally returns it. If you use the default HTTP mapping, the "name" + // should be a resource name ending with "operations/{operationId}". Name string `json:"name,omitempty"` - - // Response: The normal response of the operation in case of success. If - // the original method returns no data on success, such as "Delete", the - // response is google.protobuf.Empty. If the original method is standard - // Get/Create/Update, the response should be the resource. For other - // methods, the response should have the type "XxxResponse", where "Xxx" - // is the original method name. For example, if the original method name - // is "TakeSnapshot()", the inferred response type is - // "TakeSnapshotResponse". + // Response: The normal response of the operation in case of success. If the + // original method returns no data on success, such as "Delete", the response + // is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other methods, + // the response should have the type "XxxResponse", where "Xxx" is the original + // method name. For example, if the original method name is "TakeSnapshot()", + // the inferred response type is "TakeSnapshotResponse". Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Done") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Done") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { type NoMethod GoogleLongrunningOperation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// GoogleRpcStatus: The "Status" type defines a logical error model that -// is suitable for different programming environments, including REST -// APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each -// "Status" message contains three pieces of data: error code, error -// message, and error details. You can find out more about this error -// model and how to work with it in the API Design Guide -// (https://cloud.google.com/apis/design/errors). +// GoogleRpcStatus: The "Status" type defines a logical error model that is +// suitable for different programming environments, including REST APIs and RPC +// APIs. It is used by gRPC (https://github.com/grpc). Each "Status" message +// contains three pieces of data: error code, error message, and error details. +// You can find out more about this error model and how to work with it in the +// API Design Guide (https://cloud.google.com/apis/design/errors). type GoogleRpcStatus struct { - // Code: The status code, which should be an enum value of - // google.rpc.Code. + // Code: The status code, which should be an enum value of google.rpc.Code. Code int64 `json:"code,omitempty"` - - // Details: A list of messages that carry the error details. There is a - // common set of message types for APIs to use. + // Details: A list of messages that carry the error details. There is a common + // set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` - - // Message: A developer-facing error message, which should be in - // English. + // Message: A developer-facing error message, which should be in English. Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Code") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Code") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { type NoMethod GoogleRpcStatus - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// HmacKey: JSON template to produce a JSON-style HMAC Key resource for -// Create responses. +// HmacKey: JSON template to produce a JSON-style HMAC Key resource for Create +// responses. type HmacKey struct { // Kind: The kind of item this is. For HMAC keys, this is always // storage#hmacKey. Kind string `json:"kind,omitempty"` - // Metadata: Key metadata. Metadata *HmacKeyMetadata `json:"metadata,omitempty"` - // Secret: HMAC secret key material. Secret string `json:"secret,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HmacKey) MarshalJSON() ([]byte, error) { +func (s HmacKey) MarshalJSON() ([]byte, error) { type NoMethod HmacKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key -// metadata resource. +// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key metadata +// resource. type HmacKeyMetadata struct { // AccessId: The ID of the HMAC Key. AccessId string `json:"accessId,omitempty"` - // Etag: HTTP 1.1 Entity tag for the HMAC key. Etag string `json:"etag,omitempty"` - - // Id: The ID of the HMAC key, including the Project ID and the Access - // ID. + // Id: The ID of the HMAC key, including the Project ID and the Access ID. Id string `json:"id,omitempty"` - // Kind: The kind of item this is. For HMAC Key metadata, this is always // storage#hmacKeyMetadata. Kind string `json:"kind,omitempty"` - // ProjectId: Project ID owning the service account to which the key // authenticates. ProjectId string `json:"projectId,omitempty"` - // SelfLink: The link to this resource. SelfLink string `json:"selfLink,omitempty"` - - // ServiceAccountEmail: The email address of the key's associated - // service account. + // ServiceAccountEmail: The email address of the key's associated service + // account. ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` - - // State: The state of the key. Can be one of ACTIVE, INACTIVE, or - // DELETED. + // State: The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED. State string `json:"state,omitempty"` - // TimeCreated: The creation time of the HMAC key in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - - // Updated: The last modification time of the HMAC key metadata in RFC - // 3339 format. + // Updated: The last modification time of the HMAC key metadata in RFC 3339 + // format. Updated string `json:"updated,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AccessId") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AccessId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeyMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeyMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // HmacKeysMetadata: A list of hmacKeys. type HmacKeysMetadata struct { // Items: The list of items. Items []*HmacKeyMetadata `json:"items,omitempty"` - // Kind: The kind of item this is. For lists of hmacKeys, this is always // storage#hmacKeysMetadata. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { +func (s HmacKeysMetadata) MarshalJSON() ([]byte, error) { type NoMethod HmacKeysMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolder: A managed folder. type ManagedFolder struct { // Bucket: The name of the bucket containing this managed folder. Bucket string `json:"bucket,omitempty"` - - // CreateTime: The creation time of the managed folder in RFC 3339 - // format. + // CreateTime: The creation time of the managed folder in RFC 3339 format. CreateTime string `json:"createTime,omitempty"` - - // Id: The ID of the managed folder, including the bucket name and - // managed folder name. + // Id: The ID of the managed folder, including the bucket name and managed + // folder name. Id string `json:"id,omitempty"` - // Kind: The kind of item this is. For managed folders, this is always // storage#managedFolder. Kind string `json:"kind,omitempty"` - - // Metageneration: The version of the metadata for this managed folder. - // Used for preconditions and for detecting changes in metadata. + // Metageneration: The version of the metadata for this managed folder. Used + // for preconditions and for detecting changes in metadata. Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the managed folder. Required if not specified by - // URL parameter. + // Name: The name of the managed folder. Required if not specified by URL + // parameter. Name string `json:"name,omitempty"` - // SelfLink: The link to this managed folder. SelfLink string `json:"selfLink,omitempty"` - - // UpdateTime: The last update time of the managed folder metadata in - // RFC 3339 format. + // UpdateTime: The last update time of the managed folder metadata in RFC 3339 + // format. UpdateTime string `json:"updateTime,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ManagedFolder) MarshalJSON() ([]byte, error) { +func (s ManagedFolder) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolder - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ManagedFolders: A list of managed folders. type ManagedFolders struct { // Items: The list of items. Items []*ManagedFolder `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of managed folders, this is - // always storage#managedFolders. + // Kind: The kind of item this is. For lists of managed folders, this is always + // storage#managedFolders. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ManagedFolders) MarshalJSON() ([]byte, error) { +func (s ManagedFolders) MarshalJSON() ([]byte, error) { type NoMethod ManagedFolders - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notification: A subscription to receive Google PubSub notifications. type Notification struct { - // CustomAttributes: An optional list of additional attributes to attach - // to each Cloud PubSub message published for this notification - // subscription. + // CustomAttributes: An optional list of additional attributes to attach to + // each Cloud PubSub message published for this notification subscription. CustomAttributes map[string]string `json:"custom_attributes,omitempty"` - // Etag: HTTP 1.1 Entity tag for this subscription notification. Etag string `json:"etag,omitempty"` - - // EventTypes: If present, only send notifications about listed event - // types. If empty, sent notifications for all event types. + // EventTypes: If present, only send notifications about listed event types. If + // empty, sent notifications for all event types. EventTypes []string `json:"event_types,omitempty"` - // Id: The ID of the notification. Id string `json:"id,omitempty"` - // Kind: The kind of item this is. For notifications, this is always // storage#notification. Kind string `json:"kind,omitempty"` - - // ObjectNamePrefix: If present, only apply this notification - // configuration to object names that begin with this prefix. + // ObjectNamePrefix: If present, only apply this notification configuration to + // object names that begin with this prefix. ObjectNamePrefix string `json:"object_name_prefix,omitempty"` - // PayloadFormat: The desired content of the Payload. PayloadFormat string `json:"payload_format,omitempty"` - // SelfLink: The canonical URL of this notification. SelfLink string `json:"selfLink,omitempty"` - // Topic: The Cloud PubSub topic to which this subscription publishes. // Formatted as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi - // c}' + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' Topic string `json:"topic,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CustomAttributes") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CustomAttributes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CustomAttributes") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Notification) MarshalJSON() ([]byte, error) { +func (s Notification) MarshalJSON() ([]byte, error) { type NoMethod Notification - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Notifications: A list of notification subscriptions. type Notifications struct { // Items: The list of items. Items []*Notification `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of notifications, this is - // always storage#notifications. + // Kind: The kind of item this is. For lists of notifications, this is always + // storage#notifications. Kind string `json:"kind,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Notifications) MarshalJSON() ([]byte, error) { +func (s Notifications) MarshalJSON() ([]byte, error) { type NoMethod Notifications - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Object: An object. type Object struct { // Acl: Access controls on the object. Acl []*ObjectAccessControl `json:"acl,omitempty"` - // Bucket: The name of the bucket containing this object. Bucket string `json:"bucket,omitempty"` - - // CacheControl: Cache-Control directive for the object data. If - // omitted, and the object is accessible to all anonymous users, the - // default will be public, max-age=3600. + // CacheControl: Cache-Control directive for the object data. If omitted, and + // the object is accessible to all anonymous users, the default will be public, + // max-age=3600. CacheControl string `json:"cacheControl,omitempty"` - - // ComponentCount: Number of underlying components that make up this - // object. Components are accumulated by compose operations. + // ComponentCount: Number of underlying components that make up this object. + // Components are accumulated by compose operations. ComponentCount int64 `json:"componentCount,omitempty"` - // ContentDisposition: Content-Disposition of the object data. ContentDisposition string `json:"contentDisposition,omitempty"` - // ContentEncoding: Content-Encoding of the object data. ContentEncoding string `json:"contentEncoding,omitempty"` - // ContentLanguage: Content-Language of the object data. ContentLanguage string `json:"contentLanguage,omitempty"` - - // ContentType: Content-Type of the object data. If an object is stored - // without a Content-Type, it is served as application/octet-stream. + // ContentType: Content-Type of the object data. If an object is stored without + // a Content-Type, it is served as application/octet-stream. ContentType string `json:"contentType,omitempty"` - - // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; - // encoded using base64 in big-endian byte order. For more information - // about using the CRC32c checksum, see Hashes and ETags: Best - // Practices. + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using + // base64 in big-endian byte order. For more information about using the CRC32c + // checksum, see Hashes and ETags: Best Practices. Crc32c string `json:"crc32c,omitempty"` - - // CustomTime: A timestamp in RFC 3339 format specified by the user for - // an object. + // CustomTime: A timestamp in RFC 3339 format specified by the user for an + // object. CustomTime string `json:"customTime,omitempty"` - - // CustomerEncryption: Metadata of customer-supplied encryption key, if - // the object is encrypted by such a key. + // CustomerEncryption: Metadata of customer-supplied encryption key, if the + // object is encrypted by such a key. CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` - // Etag: HTTP 1.1 Entity tag for the object. Etag string `json:"etag,omitempty"` - - // EventBasedHold: Whether an object is under event-based hold. - // Event-based hold is a way to retain objects until an event occurs, - // which is signified by the hold's release (i.e. this value is set to - // false). After being released (set to false), such objects will be - // subject to bucket-level retention (if any). One sample use case of - // this flag is for banks to hold loan documents for at least 3 years - // after loan is paid in full. Here, bucket-level retention is 3 years - // and the event is the loan being paid in full. In this example, these - // objects will be held intact for any number of years until the event - // has occurred (event-based hold on the object is released) and then 3 - // more years after that. That means retention duration of the objects - // begins from the moment event-based hold transitioned from true to - // false. + // EventBasedHold: Whether an object is under event-based hold. Event-based + // hold is a way to retain objects until an event occurs, which is signified by + // the hold's release (i.e. this value is set to false). After being released + // (set to false), such objects will be subject to bucket-level retention (if + // any). One sample use case of this flag is for banks to hold loan documents + // for at least 3 years after loan is paid in full. Here, bucket-level + // retention is 3 years and the event is the loan being paid in full. In this + // example, these objects will be held intact for any number of years until the + // event has occurred (event-based hold on the object is released) and then 3 + // more years after that. That means retention duration of the objects begins + // from the moment event-based hold transitioned from true to false. EventBasedHold bool `json:"eventBasedHold,omitempty"` - // Generation: The content generation of this object. Used for object // versioning. Generation int64 `json:"generation,omitempty,string"` - - // HardDeleteTime: This is the time (in the future) when the - // soft-deleted object will no longer be restorable. It is equal to the - // soft delete time plus the current soft delete retention duration of - // the bucket. + // HardDeleteTime: This is the time (in the future) when the soft-deleted + // object will no longer be restorable. It is equal to the soft delete time + // plus the current soft delete retention duration of the bucket. HardDeleteTime string `json:"hardDeleteTime,omitempty"` - // Id: The ID of the object, including the bucket name, object name, and // generation number. Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For objects, this is always - // storage#object. + // Kind: The kind of item this is. For objects, this is always storage#object. Kind string `json:"kind,omitempty"` - - // KmsKeyName: Not currently supported. Specifying the parameter causes - // the request to fail with status code 400 - Bad Request. + // KmsKeyName: Not currently supported. Specifying the parameter causes the + // request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` - - // Md5Hash: MD5 hash of the data; encoded using base64. For more - // information about using the MD5 hash, see Hashes and ETags: Best - // Practices. + // Md5Hash: MD5 hash of the data; encoded using base64. For more information + // about using the MD5 hash, see Hashes and ETags: Best Practices. Md5Hash string `json:"md5Hash,omitempty"` - // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` - // Metadata: User-provided metadata, in key/value pairs. Metadata map[string]string `json:"metadata,omitempty"` - // Metageneration: The version of the metadata for this object at this - // generation. Used for preconditions and for detecting changes in - // metadata. A metageneration number is only meaningful in the context - // of a particular generation of a particular object. + // generation. Used for preconditions and for detecting changes in metadata. A + // metageneration number is only meaningful in the context of a particular + // generation of a particular object. Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the object. Required if not specified by URL - // parameter. + // Name: The name of the object. Required if not specified by URL parameter. Name string `json:"name,omitempty"` - - // Owner: The owner of the object. This will always be the uploader of - // the object. + // Owner: The owner of the object. This will always be the uploader of the + // object. Owner *ObjectOwner `json:"owner,omitempty"` - // Retention: A collection of object level retention parameters. Retention *ObjectRetention `json:"retention,omitempty"` - // RetentionExpirationTime: A server-determined value that specifies the - // earliest time that the object's retention period expires. This value - // is in RFC 3339 format. Note 1: This field is not provided for objects - // with an active event-based hold, since retention expiration is - // unknown until the hold is removed. Note 2: This value can be provided - // even when temporary hold is set (so that the user can reason about - // policy without having to first unset the temporary hold). + // earliest time that the object's retention period expires. This value is in + // RFC 3339 format. Note 1: This field is not provided for objects with an + // active event-based hold, since retention expiration is unknown until the + // hold is removed. Note 2: This value can be provided even when temporary hold + // is set (so that the user can reason about policy without having to first + // unset the temporary hold). RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"` - // SelfLink: The link to this object. SelfLink string `json:"selfLink,omitempty"` - // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` - - // SoftDeleteTime: The time at which the object became soft-deleted in - // RFC 3339 format. + // SoftDeleteTime: The time at which the object became soft-deleted in RFC 3339 + // format. SoftDeleteTime string `json:"softDeleteTime,omitempty"` - // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` - - // TemporaryHold: Whether an object is under temporary hold. While this - // flag is set to true, the object is protected against deletion and - // overwrites. A common use case of this flag is regulatory - // investigations where objects need to be retained while the - // investigation is ongoing. Note that unlike event-based hold, - // temporary hold does not impact retention expiration time of an - // object. + // TemporaryHold: Whether an object is under temporary hold. While this flag is + // set to true, the object is protected against deletion and overwrites. A + // common use case of this flag is regulatory investigations where objects need + // to be retained while the investigation is ongoing. Note that unlike + // event-based hold, temporary hold does not impact retention expiration time + // of an object. TemporaryHold bool `json:"temporaryHold,omitempty"` - // TimeCreated: The creation time of the object in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - - // TimeDeleted: The time at which the object became noncurrent in RFC - // 3339 format. Will be returned if and only if this version of the - // object has been deleted. + // TimeDeleted: The time at which the object became noncurrent in RFC 3339 + // format. Will be returned if and only if this version of the object has been + // deleted. TimeDeleted string `json:"timeDeleted,omitempty"` - - // TimeStorageClassUpdated: The time at which the object's storage class - // was last changed. When the object is initially created, it will be - // set to timeCreated. + // TimeStorageClassUpdated: The time at which the object's storage class was + // last changed. When the object is initially created, it will be set to + // timeCreated. TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` - - // Updated: The modification time of the object metadata in RFC 3339 - // format. Set initially to object creation time and then updated - // whenever any metadata of the object changes. This includes changes - // made by a requester, such as modifying custom metadata, as well as - // changes made by Cloud Storage on behalf of a requester, such as - // changing the storage class based on an Object Lifecycle - // Configuration. + // Updated: The modification time of the object metadata in RFC 3339 format. + // Set initially to object creation time and then updated whenever any metadata + // of the object changes. This includes changes made by a requester, such as + // modifying custom metadata, as well as changes made by Cloud Storage on + // behalf of a requester, such as changing the storage class based on an Object + // Lifecycle Configuration. Updated string `json:"updated,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Acl") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Acl") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Acl") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Object) MarshalJSON() ([]byte, error) { +func (s Object) MarshalJSON() ([]byte, error) { type NoMethod Object - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ObjectCustomerEncryption: Metadata of customer-supplied encryption -// key, if the object is encrypted by such a key. +// ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if +// the object is encrypted by such a key. type ObjectCustomerEncryption struct { // EncryptionAlgorithm: The encryption algorithm. EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` - // KeySha256: SHA256 hash value of the encryption key. KeySha256 string `json:"keySha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { +func (s ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { type NoMethod ObjectCustomerEncryption - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ObjectOwner: The owner of the object. This will always be the -// uploader of the object. +// ObjectOwner: The owner of the object. This will always be the uploader of +// the object. type ObjectOwner struct { // Entity: The entity, in the form user-userId. Entity string `json:"entity,omitempty"` - // EntityId: The ID for the entity. EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Entity") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Entity") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { +func (s ObjectOwner) MarshalJSON() ([]byte, error) { type NoMethod ObjectOwner - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectRetention: A collection of object level retention parameters. type ObjectRetention struct { - // Mode: The bucket's object retention mode, can only be Unlocked or - // Locked. + // Mode: The bucket's object retention mode, can only be Unlocked or Locked. Mode string `json:"mode,omitempty"` - - // RetainUntilTime: A time in RFC 3339 format until which object - // retention protects this object. + // RetainUntilTime: A time in RFC 3339 format until which object retention + // protects this object. RetainUntilTime string `json:"retainUntilTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Mode") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Mode") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectRetention) MarshalJSON() ([]byte, error) { +func (s ObjectRetention) MarshalJSON() ([]byte, error) { type NoMethod ObjectRetention - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControl: An access-control entry. type ObjectAccessControl struct { // Bucket: The name of the bucket. Bucket string `json:"bucket,omitempty"` - // Domain: The domain associated with the entity, if any. Domain string `json:"domain,omitempty"` - // Email: The email address associated with the entity, if any. Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: + // Entity: The entity holding the permission, in one of the following forms: // - user-userId // - user-email // - group-groupId @@ -2790,174 +2299,132 @@ type ObjectAccessControl struct { // - To refer to all members of the Google Apps for Business domain // example.com, the entity would be domain-example.com. Entity string `json:"entity,omitempty"` - // EntityId: The ID for the entity, if any. EntityId string `json:"entityId,omitempty"` - // Etag: HTTP 1.1 Entity tag for the access-control entry. Etag string `json:"etag,omitempty"` - - // Generation: The content generation of the object, if applied to an - // object. + // Generation: The content generation of the object, if applied to an object. Generation int64 `json:"generation,omitempty,string"` - // Id: The ID of the access-control entry. Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For object access control entries, - // this is always storage#objectAccessControl. + // Kind: The kind of item this is. For object access control entries, this is + // always storage#objectAccessControl. Kind string `json:"kind,omitempty"` - // Object: The name of the object, if applied to an object. Object string `json:"object,omitempty"` - // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` - // Role: The access permission for the entity. Role string `json:"role,omitempty"` - // SelfLink: The link to this access-control entry. SelfLink string `json:"selfLink,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bucket") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControl) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControl - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ObjectAccessControlProjectTeam: The project team associated with the -// entity, if any. +// ObjectAccessControlProjectTeam: The project team associated with the entity, +// if any. type ObjectAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` - // Team: The team. Team string `json:"team,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProjectNumber") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ProjectNumber") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControlProjectTeam - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // ObjectAccessControls: An access-control list. type ObjectAccessControls struct { // Items: The list of items. Items []*ObjectAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of object access control - // entries, this is always storage#objectAccessControls. + // Kind: The kind of item this is. For lists of object access control entries, + // this is always storage#objectAccessControls. Kind string `json:"kind,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { +func (s ObjectAccessControls) MarshalJSON() ([]byte, error) { type NoMethod ObjectAccessControls - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Objects: A list of objects. type Objects struct { // Items: The list of items. Items []*Object `json:"items,omitempty"` - // Kind: The kind of item this is. For lists of objects, this is always // storage#objects. Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. NextPageToken string `json:"nextPageToken,omitempty"` - - // Prefixes: The list of prefixes of objects matching-but-not-listed up - // to and including the requested delimiter. + // Prefixes: The list of prefixes of objects matching-but-not-listed up to and + // including the requested delimiter. Prefixes []string `json:"prefixes,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Objects) MarshalJSON() ([]byte, error) { +func (s Objects) MarshalJSON() ([]byte, error) { type NoMethod Objects - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // Policy: A bucket/object/managedFolder IAM policy. @@ -2965,231 +2432,182 @@ type Policy struct { // Bindings: An association between a role, which comes with a set of // permissions, and members who may assume that role. Bindings []*PolicyBindings `json:"bindings,omitempty"` - // Etag: HTTP 1.1 Entity tag for the policy. Etag string `json:"etag,omitempty"` - - // Kind: The kind of item this is. For policies, this is always - // storage#policy. This field is ignored on input. + // Kind: The kind of item this is. For policies, this is always storage#policy. + // This field is ignored on input. Kind string `json:"kind,omitempty"` - - // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, + // ResourceId: The ID of the resource to which this policy belongs. Will be of + // the form projects/_/buckets/bucket for buckets, // projects/_/buckets/bucket/objects/object for objects, and // projects/_/buckets/bucket/managedFolders/managedFolder. A specific - // generation may be specified by appending #generationNumber to the end - // of the object name, e.g. - // projects/_/buckets/my-bucket/objects/data.txt#17. The current - // generation can be denoted with #0. This field is ignored on input. + // generation may be specified by appending #generationNumber to the end of the + // object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The + // current generation can be denoted with #0. This field is ignored on input. ResourceId string `json:"resourceId,omitempty"` - // Version: The IAM policy format version. Version int64 `json:"version,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Bindings") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { +func (s Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } type PolicyBindings struct { - // Condition: The condition that is associated with this binding. NOTE: - // an unsatisfied condition will not allow user access via current - // binding. Different bindings, including their conditions, are examined - // independently. + // Condition: The condition that is associated with this binding. NOTE: an + // unsatisfied condition will not allow user access via current binding. + // Different bindings, including their conditions, are examined independently. Condition *Expr `json:"condition,omitempty"` - - // Members: A collection of identifiers for members who may assume the - // provided role. Recognized identifiers are as follows: - // - allUsers — A special identifier that represents anyone on the - // internet; with or without a Google account. - // - allAuthenticatedUsers — A special identifier that represents - // anyone who is authenticated with a Google account or a service - // account. - // - user:emailid — An email address that represents a specific - // account. For example, user:alice@gmail.com or user:joe@example.com. - // - // - serviceAccount:emailid — An email address that represents a - // service account. For example, + // Members: A collection of identifiers for members who may assume the provided + // role. Recognized identifiers are as follows: + // - allUsers — A special identifier that represents anyone on the internet; + // with or without a Google account. + // - allAuthenticatedUsers — A special identifier that represents anyone who + // is authenticated with a Google account or a service account. + // - user:emailid — An email address that represents a specific account. For + // example, user:alice@gmail.com or user:joe@example.com. + // - serviceAccount:emailid — An email address that represents a service + // account. For example, // serviceAccount:my-other-app@appspot.gserviceaccount.com . - // - group:emailid — An email address that represents a Google group. - // For example, group:admins@example.com. - // - domain:domain — A Google Apps domain name that represents all the - // users of that domain. For example, domain:google.com or - // domain:example.com. - // - projectOwner:projectid — Owners of the given project. For - // example, projectOwner:my-example-project - // - projectEditor:projectid — Editors of the given project. For - // example, projectEditor:my-example-project - // - projectViewer:projectid — Viewers of the given project. For - // example, projectViewer:my-example-project + // - group:emailid — An email address that represents a Google group. For + // example, group:admins@example.com. + // - domain:domain — A Google Apps domain name that represents all the users + // of that domain. For example, domain:google.com or domain:example.com. + // - projectOwner:projectid — Owners of the given project. For example, + // projectOwner:my-example-project + // - projectEditor:projectid — Editors of the given project. For example, + // projectEditor:my-example-project + // - projectViewer:projectid — Viewers of the given project. For example, + // projectViewer:my-example-project Members []string `json:"members,omitempty"` - - // Role: The role to which members belong. Two types of roles are - // supported: new IAM roles, which grant permissions that do not map - // directly to those provided by ACLs, and legacy IAM roles, which do - // map directly to ACL permissions. All roles are of the format - // roles/storage.specificRole. + // Role: The role to which members belong. Two types of roles are supported: + // new IAM roles, which grant permissions that do not map directly to those + // provided by ACLs, and legacy IAM roles, which do map directly to ACL + // permissions. All roles are of the format roles/storage.specificRole. // The new IAM roles are: - // - roles/storage.admin — Full control of Google Cloud Storage - // resources. - // - roles/storage.objectViewer — Read-Only access to Google Cloud - // Storage objects. - // - roles/storage.objectCreator — Access to create objects in Google - // Cloud Storage. + // - roles/storage.admin — Full control of Google Cloud Storage resources. + // + // - roles/storage.objectViewer — Read-Only access to Google Cloud Storage + // objects. + // - roles/storage.objectCreator — Access to create objects in Google Cloud + // Storage. // - roles/storage.objectAdmin — Full control of Google Cloud Storage // objects. The legacy IAM roles are: - // - roles/storage.legacyObjectReader — Read-only access to objects - // without listing. Equivalent to an ACL entry on an object with the - // READER role. - // - roles/storage.legacyObjectOwner — Read/write access to existing - // objects without listing. Equivalent to an ACL entry on an object with - // the OWNER role. - // - roles/storage.legacyBucketReader — Read access to buckets with - // object listing. Equivalent to an ACL entry on a bucket with the - // READER role. - // - roles/storage.legacyBucketWriter — Read access to buckets with - // object listing/creation/deletion. Equivalent to an ACL entry on a - // bucket with the WRITER role. - // - roles/storage.legacyBucketOwner — Read and write access to - // existing buckets with object listing/creation/deletion. Equivalent to - // an ACL entry on a bucket with the OWNER role. + // - roles/storage.legacyObjectReader — Read-only access to objects without + // listing. Equivalent to an ACL entry on an object with the READER role. + // - roles/storage.legacyObjectOwner — Read/write access to existing objects + // without listing. Equivalent to an ACL entry on an object with the OWNER + // role. + // - roles/storage.legacyBucketReader — Read access to buckets with object + // listing. Equivalent to an ACL entry on a bucket with the READER role. + // - roles/storage.legacyBucketWriter — Read access to buckets with object + // listing/creation/deletion. Equivalent to an ACL entry on a bucket with the + // WRITER role. + // - roles/storage.legacyBucketOwner — Read and write access to existing + // buckets with object listing/creation/deletion. Equivalent to an ACL entry on + // a bucket with the OWNER role. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Condition") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Condition") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *PolicyBindings) MarshalJSON() ([]byte, error) { +func (s PolicyBindings) MarshalJSON() ([]byte, error) { type NoMethod PolicyBindings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // RewriteResponse: A rewrite response. type RewriteResponse struct { - // Done: true if the copy is finished; otherwise, false if the copy is - // in progress. This property is always present in the response. + // Done: true if the copy is finished; otherwise, false if the copy is in + // progress. This property is always present in the response. Done bool `json:"done,omitempty"` - // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` - // ObjectSize: The total size of the object being copied in bytes. This // property is always present in the response. ObjectSize int64 `json:"objectSize,omitempty,string"` - - // Resource: A resource containing the metadata for the copied-to - // object. This property is present in the response only when copying - // completes. + // Resource: A resource containing the metadata for the copied-to object. This + // property is present in the response only when copying completes. Resource *Object `json:"resource,omitempty"` - - // RewriteToken: A token to use in subsequent requests to continue - // copying data. This token is present in the response only when there - // is more data to copy. + // RewriteToken: A token to use in subsequent requests to continue copying + // data. This token is present in the response only when there is more data to + // copy. RewriteToken string `json:"rewriteToken,omitempty"` - - // TotalBytesRewritten: The total bytes written so far, which can be - // used to provide a waiting user with a progress indicator. This - // property is always present in the response. + // TotalBytesRewritten: The total bytes written so far, which can be used to + // provide a waiting user with a progress indicator. This property is always + // present in the response. TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Done") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Done") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { +func (s RewriteResponse) MarshalJSON() ([]byte, error) { type NoMethod RewriteResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// ServiceAccount: A subscription to receive Google PubSub -// notifications. +// ServiceAccount: A subscription to receive Google PubSub notifications. type ServiceAccount struct { // EmailAddress: The ID of the notification. EmailAddress string `json:"email_address,omitempty"` - // Kind: The kind of item this is. For notifications, this is always // storage#notification. Kind string `json:"kind,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "EmailAddress") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EmailAddress") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "EmailAddress") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { +func (s ServiceAccount) MarshalJSON() ([]byte, error) { type NoMethod ServiceAccount - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: A @@ -3197,11 +2615,9 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { type TestIamPermissionsResponse struct { // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` - - // Permissions: The permissions held by the caller. Permissions are - // always of the format storage.resource.capability, where resource is - // one of buckets, objects, or managedFolders. The supported permissions - // are as follows: + // Permissions: The permissions held by the caller. Permissions are always of + // the format storage.resource.capability, where resource is one of buckets, + // objects, or managedFolders. The supported permissions are as follows: // - storage.buckets.delete — Delete bucket. // - storage.buckets.get — Read bucket metadata. // - storage.buckets.getIamPolicy — Read bucket IAM policy. @@ -3218,43 +2634,33 @@ type TestIamPermissionsResponse struct { // - storage.objects.update — Update object metadata. // - storage.managedFolders.delete — Delete managed folder. // - storage.managedFolders.get — Read managed folder metadata. - // - storage.managedFolders.getIamPolicy — Read managed folder IAM - // policy. + // - storage.managedFolders.getIamPolicy — Read managed folder IAM policy. + // // - storage.managedFolders.create — Create managed folder. // - storage.managedFolders.list — List managed folders. - // - storage.managedFolders.setIamPolicy — Update managed folder IAM - // policy. + // - storage.managedFolders.setIamPolicy — Update managed folder IAM policy. Permissions []string `json:"permissions,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. + // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// method id "storage.anywhereCaches.disable": - type AnywhereCachesDisableCall struct { s *Service bucket string @@ -3276,23 +2682,21 @@ func (r *AnywhereCachesService) Disable(bucket string, anywhereCacheId string) * } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesDisableCall) Fields(s ...googleapi.Field) *AnywhereCachesDisableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesDisableCall) Context(ctx context.Context) *AnywhereCachesDisableCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesDisableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -3301,12 +2705,7 @@ func (c *AnywhereCachesDisableCall) Header() http.Header { } func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -3325,12 +2724,10 @@ func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error } // Do executes the "storage.anywhereCaches.disable" call. -// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AnywhereCache.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -3361,43 +2758,8 @@ func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereC return nil, err } return ret, nil - // { - // "description": "Disables an Anywhere Cache instance.", - // "httpMethod": "POST", - // "id": "storage.anywhereCaches.disable", - // "parameterOrder": [ - // "bucket", - // "anywhereCacheId" - // ], - // "parameters": { - // "anywhereCacheId": { - // "description": "The ID of requested Anywhere Cache instance.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", - // "response": { - // "$ref": "AnywhereCache" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.anywhereCaches.get": - type AnywhereCachesGetCall struct { s *Service bucket string @@ -3420,33 +2782,29 @@ func (r *AnywhereCachesService) Get(bucket string, anywhereCacheId string) *Anyw } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesGetCall) Fields(s ...googleapi.Field) *AnywhereCachesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *AnywhereCachesGetCall) IfNoneMatch(entityTag string) *AnywhereCachesGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesGetCall) Context(ctx context.Context) *AnywhereCachesGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -3455,12 +2813,7 @@ func (c *AnywhereCachesGetCall) Header() http.Header { } func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3482,12 +2835,10 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.anywhereCaches.get" call. -// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AnywhereCache.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -3518,45 +2869,8 @@ func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache return nil, err } return ret, nil - // { - // "description": "Returns the metadata of an Anywhere Cache instance.", - // "httpMethod": "GET", - // "id": "storage.anywhereCaches.get", - // "parameterOrder": [ - // "bucket", - // "anywhereCacheId" - // ], - // "parameters": { - // "anywhereCacheId": { - // "description": "The ID of requested Anywhere Cache instance.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", - // "response": { - // "$ref": "AnywhereCache" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.anywhereCaches.insert": - type AnywhereCachesInsertCall struct { s *Service bucket string @@ -3577,23 +2891,21 @@ func (r *AnywhereCachesService) Insert(bucket string, anywherecache *AnywhereCac } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesInsertCall) Fields(s ...googleapi.Field) *AnywhereCachesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesInsertCall) Context(ctx context.Context) *AnywhereCachesInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -3602,18 +2914,12 @@ func (c *AnywhereCachesInsertCall) Header() http.Header { } func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") @@ -3630,12 +2936,11 @@ func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.anywhereCaches.insert" call. -// Exactly one of *GoogleLongrunningOperation or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GoogleLongrunningOperation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -3666,39 +2971,8 @@ func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLong return nil, err } return ret, nil - // { - // "description": "Creates an Anywhere Cache instance.", - // "httpMethod": "POST", - // "id": "storage.anywhereCaches.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches", - // "request": { - // "$ref": "AnywhereCache" - // }, - // "response": { - // "$ref": "GoogleLongrunningOperation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.anywhereCaches.list": - type AnywhereCachesListCall struct { s *Service bucket string @@ -3708,8 +2982,8 @@ type AnywhereCachesListCall struct { header_ http.Header } -// List: Returns a list of Anywhere Cache instances of the bucket -// matching the criteria. +// List: Returns a list of Anywhere Cache instances of the bucket matching the +// criteria. // // - bucket: Name of the parent bucket. func (r *AnywhereCachesService) List(bucket string) *AnywhereCachesListCall { @@ -3718,49 +2992,44 @@ func (r *AnywhereCachesService) List(bucket string) *AnywhereCachesListCall { return c } -// PageSize sets the optional parameter "pageSize": Maximum number of -// items to return in a single page of responses. Maximum 1000. +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. Maximum 1000. func (c *AnywhereCachesListCall) PageSize(pageSize int64) *AnywhereCachesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *AnywhereCachesListCall) PageToken(pageToken string) *AnywhereCachesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesListCall) Fields(s ...googleapi.Field) *AnywhereCachesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *AnywhereCachesListCall) IfNoneMatch(entityTag string) *AnywhereCachesListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesListCall) Context(ctx context.Context) *AnywhereCachesListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -3769,12 +3038,7 @@ func (c *AnywhereCachesListCall) Header() http.Header { } func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -3795,12 +3059,10 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.anywhereCaches.list" call. -// Exactly one of *AnywhereCaches or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AnywhereCaches.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCaches.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -3831,46 +3093,6 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach return nil, err } return ret, nil - // { - // "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.anywhereCaches.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "Maximum number of items to return in a single page of responses. Maximum 1000.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches", - // "response": { - // "$ref": "AnywhereCaches" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } // Pages invokes f for each page of results. @@ -3878,7 +3100,7 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach // The provided context supersedes any context provided to the Context method. func (c *AnywhereCachesListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -3894,8 +3116,6 @@ func (c *AnywhereCachesListCall) Pages(ctx context.Context, f func(*AnywhereCach } } -// method id "storage.anywhereCaches.pause": - type AnywhereCachesPauseCall struct { s *Service bucket string @@ -3917,23 +3137,21 @@ func (r *AnywhereCachesService) Pause(bucket string, anywhereCacheId string) *An } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesPauseCall) Fields(s ...googleapi.Field) *AnywhereCachesPauseCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesPauseCall) Context(ctx context.Context) *AnywhereCachesPauseCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesPauseCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -3942,12 +3160,7 @@ func (c *AnywhereCachesPauseCall) Header() http.Header { } func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -3966,12 +3179,10 @@ func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.anywhereCaches.pause" call. -// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AnywhereCache.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4002,43 +3213,8 @@ func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCac return nil, err } return ret, nil - // { - // "description": "Pauses an Anywhere Cache instance.", - // "httpMethod": "POST", - // "id": "storage.anywhereCaches.pause", - // "parameterOrder": [ - // "bucket", - // "anywhereCacheId" - // ], - // "parameters": { - // "anywhereCacheId": { - // "description": "The ID of requested Anywhere Cache instance.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", - // "response": { - // "$ref": "AnywhereCache" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.anywhereCaches.resume": - type AnywhereCachesResumeCall struct { s *Service bucket string @@ -4060,23 +3236,21 @@ func (r *AnywhereCachesService) Resume(bucket string, anywhereCacheId string) *A } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesResumeCall) Fields(s ...googleapi.Field) *AnywhereCachesResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesResumeCall) Context(ctx context.Context) *AnywhereCachesResumeCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4085,12 +3259,7 @@ func (c *AnywhereCachesResumeCall) Header() http.Header { } func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -4109,12 +3278,10 @@ func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.anywhereCaches.resume" call. -// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AnywhereCache.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4145,43 +3312,8 @@ func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCa return nil, err } return ret, nil - // { - // "description": "Resumes a paused or disabled Anywhere Cache instance.", - // "httpMethod": "POST", - // "id": "storage.anywhereCaches.resume", - // "parameterOrder": [ - // "bucket", - // "anywhereCacheId" - // ], - // "parameters": { - // "anywhereCacheId": { - // "description": "The ID of requested Anywhere Cache instance.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", - // "response": { - // "$ref": "AnywhereCache" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.anywhereCaches.update": - type AnywhereCachesUpdateCall struct { s *Service bucket string @@ -4192,8 +3324,8 @@ type AnywhereCachesUpdateCall struct { header_ http.Header } -// Update: Updates the config(ttl and admissionPolicy) of an Anywhere -// Cache instance. +// Update: Updates the config(ttl and admissionPolicy) of an Anywhere Cache +// instance. // // - anywhereCacheId: The ID of requested Anywhere Cache instance. // - bucket: Name of the parent bucket. @@ -4206,23 +3338,21 @@ func (r *AnywhereCachesService) Update(bucket string, anywhereCacheId string, an } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *AnywhereCachesUpdateCall) Fields(s ...googleapi.Field) *AnywhereCachesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *AnywhereCachesUpdateCall) Context(ctx context.Context) *AnywhereCachesUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *AnywhereCachesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4231,18 +3361,12 @@ func (c *AnywhereCachesUpdateCall) Header() http.Header { } func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") @@ -4260,12 +3384,11 @@ func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.anywhereCaches.update" call. -// Exactly one of *GoogleLongrunningOperation or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GoogleLongrunningOperation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4296,46 +3419,8 @@ func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLong return nil, err } return ret, nil - // { - // "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", - // "httpMethod": "PATCH", - // "id": "storage.anywhereCaches.update", - // "parameterOrder": [ - // "bucket", - // "anywhereCacheId" - // ], - // "parameters": { - // "anywhereCacheId": { - // "description": "The ID of requested Anywhere Cache instance.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "bucket": { - // "description": "Name of the parent bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", - // "request": { - // "$ref": "AnywhereCache" - // }, - // "response": { - // "$ref": "GoogleLongrunningOperation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.bucketAccessControls.delete": - type BucketAccessControlsDeleteCall struct { s *Service bucket string @@ -4345,8 +3430,8 @@ type BucketAccessControlsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. +// Delete: Permanently deletes the ACL entry for the specified entity on the +// specified bucket. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, @@ -4359,31 +3444,29 @@ func (r *BucketAccessControlsService) Delete(bucket string, entity string) *Buck return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4392,12 +3475,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { } func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -4427,44 +3505,8 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.bucketAccessControls.get": - type BucketAccessControlsGetCall struct { s *Service bucket string @@ -4475,8 +3517,7 @@ type BucketAccessControlsGetCall struct { header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. +// Get: Returns the ACL entry for the specified entity on the specified bucket. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, @@ -4489,41 +3530,37 @@ func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketA return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4532,12 +3569,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { } func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4559,12 +3591,11 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err } // Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4595,47 +3626,8 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA return nil, err } return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.bucketAccessControls.insert": - type BucketAccessControlsInsertCall struct { s *Service bucket string @@ -4655,31 +3647,29 @@ func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4688,18 +3678,12 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { } func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") @@ -4716,12 +3700,11 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4752,43 +3735,8 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck return nil, err } return ret, nil - // { - // "description": "Creates a new ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.bucketAccessControls.list": - type BucketAccessControlsListCall struct { s *Service bucket string @@ -4807,41 +3755,37 @@ func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsL return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -4850,12 +3794,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { } func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -4876,12 +3815,11 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er } // Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -4912,40 +3850,8 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket return nil, err } return ret, nil - // { - // "description": "Retrieves ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "response": { - // "$ref": "BucketAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.bucketAccessControls.patch": - type BucketAccessControlsPatchCall struct { s *Service bucket string @@ -4970,31 +3876,29 @@ func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucket return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5003,18 +3907,12 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { } func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") @@ -5032,12 +3930,11 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } // Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -5068,50 +3965,8 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke return nil, err } return ret, nil - // { - // "description": "Patches an ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.bucketAccessControls.update": - type BucketAccessControlsUpdateCall struct { s *Service bucket string @@ -5136,31 +3991,29 @@ func (r *BucketAccessControlsService) Update(bucket string, entity string, bucke return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5169,18 +4022,12 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { } func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") @@ -5198,12 +4045,11 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -5234,50 +4080,8 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck return nil, err } return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.buckets.delete": - type BucketsDeleteCall struct { s *Service bucket string @@ -5295,9 +4099,8 @@ func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the bucket if its metageneration matches this value. func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -5311,31 +4114,29 @@ func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5344,12 +4145,7 @@ func (c *BucketsDeleteCall) Header() http.Header { } func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -5378,50 +4174,8 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.get": - type BucketsGetCall struct { s *Service bucket string @@ -5440,10 +4194,9 @@ func (r *BucketsService) Get(bucket string) *BucketsGetCall { return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -5451,15 +4204,15 @@ func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *Buc // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. +// conditional on whether the bucket's current metageneration does not match +// the given value. func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. // // Possible values: // @@ -5470,41 +4223,37 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5513,12 +4262,7 @@ func (c *BucketsGetCall) Header() http.Header { } func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5539,12 +4283,10 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -5575,68 +4317,8 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return nil, err } return ret, nil - // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.getIamPolicy": - type BucketsGetIamPolicyCall struct { s *Service bucket string @@ -5657,49 +4339,44 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { // OptionsRequestedPolicyVersion sets the optional parameter // "optionsRequestedPolicyVersion": The IAM policy format version to be -// returned. If the optionsRequestedPolicyVersion is for an older -// version that doesn't support part of the requested IAM policy, the -// request fails. +// returned. If the optionsRequestedPolicyVersion is for an older version that +// doesn't support part of the requested IAM policy, the request fails. func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5708,12 +4385,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { } func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -5734,12 +4406,10 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.buckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -5770,46 +4440,124 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err return nil, err } return ret, nil - // { - // "description": "Returns an IAM policy for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.getIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "optionsRequestedPolicyVersion": { - // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - // "format": "int32", - // "location": "query", - // "minimum": "1", - // "type": "integer" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.buckets.insert": +type BucketsGetStorageLayoutCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStorageLayout: Returns the storage layout configuration for the specified +// bucket. Note that this operation requires storage.objects.list permission. +// +// - bucket: Name of a bucket. +func (r *BucketsService) GetStorageLayout(bucket string) *BucketsGetStorageLayoutCall { + c := &BucketsGetStorageLayoutCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Prefix sets the optional parameter "prefix": An optional prefix used for +// permission check. It is useful when the caller only has storage.objects.list +// permission under a specific prefix. +func (c *BucketsGetStorageLayoutCall) Prefix(prefix string) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsGetStorageLayoutCall) Fields(s ...googleapi.Field) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsGetStorageLayoutCall) IfNoneMatch(entityTag string) *BucketsGetStorageLayoutCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsGetStorageLayoutCall) Context(ctx context.Context) *BucketsGetStorageLayoutCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsGetStorageLayoutCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getStorageLayout" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketStorageLayout.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketStorageLayout, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketStorageLayout{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} type BucketsInsertCall struct { s *Service @@ -5829,9 +4577,8 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert return c } -// EnableObjectRetention sets the optional parameter -// "enableObjectRetention": When set to true, object retention is -// enabled for this bucket. +// EnableObjectRetention sets the optional parameter "enableObjectRetention": +// When set to true, object retention is enabled for this bucket. func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *BucketsInsertCall { c.urlParams_.Set("enableObjectRetention", fmt.Sprint(enableObjectRetention)) return c @@ -5847,25 +4594,25 @@ func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *B // allAuthenticatedUsers get READER access. // // "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// "projectPrivate" - Project team members get access according to their // -// their roles. +// roles. // -// "publicRead" - Project team owners get OWNER access, and allUsers +// "publicRead" - Project team owners get OWNER access, and allUsers get // -// get READER access. +// READER access. // -// "publicReadWrite" - Project team owners get OWNER access, and +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get // -// allUsers get WRITER access. +// WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. // // Possible values: // @@ -5873,31 +4620,30 @@ func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCa // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the bucket resource specifies acl or +// defaultObjectAcl properties, when it defaults to full. // // Possible values: // @@ -5908,31 +4654,29 @@ func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -5941,18 +4685,12 @@ func (c *BucketsInsertCall) Header() http.Header { } func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b") @@ -5966,12 +4704,10 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -6002,103 +4738,8 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return nil, err } return ret, nil - // { - // "description": "Creates a new bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "enableObjectRetention": { - // "default": "false", - // "description": "When set to true, object retention is enabled for this bucket.", - // "location": "query", - // "type": "boolean" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.list": - type BucketsListCall struct { s *Service urlParams_ gensupport.URLParams @@ -6116,31 +4757,30 @@ func (r *BucketsService) List(projectid string) *BucketsListCall { return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return in a single response. The service will use this -// parameter or 1,000 items, whichever is smaller. +// MaxResults sets the optional parameter "maxResults": Maximum number of +// buckets to return in a single response. The service will use this parameter +// or 1,000 items, whichever is smaller. func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. +// Prefix sets the optional parameter "prefix": Filter results to buckets whose +// names begin with this prefix. func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { c.urlParams_.Set("prefix", prefix) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. // // Possible values: // @@ -6151,41 +4791,37 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -6194,12 +4830,7 @@ func (c *BucketsListCall) Header() http.Header { } func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -6217,12 +4848,10 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -6253,70 +4882,6 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { return nil, err } return ret, nil - // { - // "description": "Retrieves a list of buckets for a given project.", - // "httpMethod": "GET", - // "id": "storage.buckets.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "response": { - // "$ref": "Buckets" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } // Pages invokes f for each page of results. @@ -6324,7 +4889,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { // The provided context supersedes any context provided to the Context method. func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -6340,8 +4905,6 @@ func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) err } } -// method id "storage.buckets.lockRetentionPolicy": - type BucketsLockRetentionPolicyCall struct { s *Service bucket string @@ -6353,8 +4916,8 @@ type BucketsLockRetentionPolicyCall struct { // LockRetentionPolicy: Locks retention policy on a bucket. // // - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. +// - ifMetagenerationMatch: Makes the operation conditional on whether bucket's +// current metageneration matches the given value. func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6362,31 +4925,29 @@ func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatc return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsLockRetentionPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -6395,12 +4956,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { } func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -6418,12 +4974,10 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.buckets.lockRetentionPolicy" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -6454,49 +5008,8 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck return nil, err } return ret, nil - // { - // "description": "Locks retention policy on a bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.lockRetentionPolicy", - // "parameterOrder": [ - // "bucket", - // "ifMetagenerationMatch" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/lockRetentionPolicy", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.patch": - type BucketsPatchCall struct { s *Service bucket string @@ -6506,9 +5019,8 @@ type BucketsPatchCall struct { header_ http.Header } -// Patch: Patches a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// Patch: Patches a bucket. Changes to the bucket will be readable immediately +// after writing, but configuration changes may take time to propagate. // // - bucket: Name of a bucket. func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { @@ -6518,10 +5030,9 @@ func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -6529,8 +5040,8 @@ func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *B // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. +// conditional on whether the bucket's current metageneration does not match +// the given value. func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c @@ -6546,25 +5057,25 @@ func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // allAuthenticatedUsers get READER access. // // "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// "projectPrivate" - Project team members get access according to their // -// their roles. +// roles. // -// "publicRead" - Project team owners get OWNER access, and allUsers +// "publicRead" - Project team owners get OWNER access, and allUsers get // -// get READER access. +// READER access. // -// "publicReadWrite" - Project team owners get OWNER access, and +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get // -// allUsers get WRITER access. +// WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. // // Possible values: // @@ -6572,29 +5083,29 @@ func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. // // Possible values: // @@ -6605,31 +5116,29 @@ func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -6638,18 +5147,12 @@ func (c *BucketsPatchCall) Header() http.Header { } func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") @@ -6666,12 +5169,10 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -6702,108 +5203,8 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return nil, err } return ret, nil - // { - // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.buckets.setIamPolicy": - type BucketsSetIamPolicyCall struct { s *Service bucket string @@ -6823,31 +5224,29 @@ func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSet return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -6856,18 +5255,12 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { } func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") @@ -6884,12 +5277,10 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.buckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -6920,43 +5311,8 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err return nil, err } return ret, nil - // { - // "description": "Updates an IAM policy for the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.setIamPolicy", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.buckets.testIamPermissions": - type BucketsTestIamPermissionsCall struct { s *Service bucket string @@ -6966,8 +5322,8 @@ type BucketsTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests a set of permissions on the given bucket to -// see which, if any, are held by the caller. +// TestIamPermissions: Tests a set of permissions on the given bucket to see +// which, if any, are held by the caller. // // - bucket: Name of a bucket. // - permissions: Permissions to test. @@ -6978,41 +5334,37 @@ func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7021,12 +5373,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { } func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7047,12 +5394,11 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } // Do executes the "storage.buckets.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -7083,51 +5429,8 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI return nil, err } return ret, nil - // { - // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.buckets.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.update": - type BucketsUpdateCall struct { s *Service bucket string @@ -7137,9 +5440,8 @@ type BucketsUpdateCall struct { header_ http.Header } -// Update: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// Update: Updates a bucket. Changes to the bucket will be readable immediately +// after writing, but configuration changes may take time to propagate. // // - bucket: Name of a bucket. func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { @@ -7149,10 +5451,9 @@ func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCa return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -7160,8 +5461,8 @@ func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. +// conditional on whether the bucket's current metageneration does not match +// the given value. func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c @@ -7177,25 +5478,25 @@ func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // allAuthenticatedUsers get READER access. // // "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// "projectPrivate" - Project team members get access according to their // -// their roles. +// roles. // -// "publicRead" - Project team owners get OWNER access, and allUsers +// "publicRead" - Project team owners get OWNER access, and allUsers get // -// get READER access. +// READER access. // -// "publicReadWrite" - Project team owners get OWNER access, and +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get // -// allUsers get WRITER access. +// WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. // // Possible values: // @@ -7203,29 +5504,29 @@ func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCa // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. // // Possible values: // @@ -7236,31 +5537,29 @@ func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *BucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7269,18 +5568,12 @@ func (c *BucketsUpdateCall) Header() http.Header { } func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") @@ -7297,12 +5590,10 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -7333,108 +5624,8 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return nil, err } return ret, nil - // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.channels.stop": - type ChannelsStopCall struct { s *Service channel *Channel @@ -7451,23 +5642,21 @@ func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ChannelsStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7476,18 +5665,12 @@ func (c *ChannelsStopCall) Header() http.Header { } func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") @@ -7512,28 +5695,8 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.delete": - type DefaultObjectAccessControlsDeleteCall struct { s *Service bucket string @@ -7543,8 +5706,8 @@ type DefaultObjectAccessControlsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. +// Delete: Permanently deletes the default object ACL entry for the specified +// entity on the specified bucket. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, @@ -7557,31 +5720,29 @@ func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7590,12 +5751,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { } func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -7625,44 +5781,8 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.get": - type DefaultObjectAccessControlsGetCall struct { s *Service bucket string @@ -7673,8 +5793,8 @@ type DefaultObjectAccessControlsGetCall struct { header_ http.Header } -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. +// Get: Returns the default object ACL entry for the specified entity on the +// specified bucket. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, @@ -7687,41 +5807,37 @@ func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) * return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7730,12 +5846,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { } func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -7757,12 +5868,11 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon } // Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -7793,47 +5903,8 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* return nil, err } return ret, nil - // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.insert": - type DefaultObjectAccessControlsInsertCall struct { s *Service bucket string @@ -7843,8 +5914,7 @@ type DefaultObjectAccessControlsInsertCall struct { header_ http.Header } -// Insert: Creates a new default object ACL entry on the specified -// bucket. +// Insert: Creates a new default object ACL entry on the specified bucket. // // - bucket: Name of a bucket. func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { @@ -7854,31 +5924,29 @@ func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccessc return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -7887,18 +5955,12 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { } func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") @@ -7915,12 +5977,11 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res } // Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -7951,43 +6012,8 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) return nil, err } return ret, nil - // { - // "description": "Creates a new default object ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.list": - type DefaultObjectAccessControlsListCall struct { s *Service bucket string @@ -8006,58 +6032,53 @@ func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectA return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If present, only return default ACL listing if the bucket's current +// metageneration matches this value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. +// "ifMetagenerationNotMatch": If present, only return default ACL listing if +// the bucket's current metageneration does not match the given value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8066,12 +6087,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { } func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8092,12 +6108,11 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo } // Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -8128,52 +6143,8 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( return nil, err } return ret, nil - // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.patch": - type DefaultObjectAccessControlsPatchCall struct { s *Service bucket string @@ -8198,31 +6169,29 @@ func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8231,18 +6200,12 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { } func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") @@ -8260,12 +6223,11 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp } // Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -8296,50 +6258,8 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) return nil, err } return ret, nil - // { - // "description": "Patches a default object ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.defaultObjectAccessControls.update": - type DefaultObjectAccessControlsUpdateCall struct { s *Service bucket string @@ -8364,31 +6284,29 @@ func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8397,18 +6315,12 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { } func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") @@ -8426,12 +6338,11 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res } // Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -8462,50 +6373,8 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) return nil, err } return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.folders.delete": - type FoldersDeleteCall struct { s *Service bucket string @@ -8527,9 +6396,8 @@ func (r *FoldersService) Delete(bucket string, folder string) *FoldersDeleteCall return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the folder if its -// metageneration matches this value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the folder if its metageneration matches this value. func (c *FoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -8544,23 +6412,21 @@ func (c *FoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *FoldersDeleteCall) Fields(s ...googleapi.Field) *FoldersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *FoldersDeleteCall) Context(ctx context.Context) *FoldersDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *FoldersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8569,12 +6435,7 @@ func (c *FoldersDeleteCall) Header() http.Header { } func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -8604,52 +6465,8 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.", - // "httpMethod": "DELETE", - // "id": "storage.folders.delete", - // "parameterOrder": [ - // "bucket", - // "folder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the folder resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "folder": { - // "description": "Name of a folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the folder if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the folder if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/folders/{folder}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.folders.get": - type FoldersGetCall struct { s *Service bucket string @@ -8660,8 +6477,8 @@ type FoldersGetCall struct { header_ http.Header } -// Get: Returns metadata for the specified folder. Only applicable to -// buckets with hierarchical namespace enabled. +// Get: Returns metadata for the specified folder. Only applicable to buckets +// with hierarchical namespace enabled. // // - bucket: Name of the bucket in which the folder resides. // - folder: Name of a folder. @@ -8672,10 +6489,9 @@ func (r *FoldersService) Get(bucket string, folder string) *FoldersGetCall { return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the folder metadata -// conditional on whether the folder's current metageneration matches -// the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the folder metadata conditional on whether the folder's +// current metageneration matches the given value. func (c *FoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c @@ -8683,41 +6499,37 @@ func (c *FoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *Fol // IfMetagenerationNotMatch sets the optional parameter // "ifMetagenerationNotMatch": Makes the return of the folder metadata -// conditional on whether the folder's current metageneration does not -// match the given value. +// conditional on whether the folder's current metageneration does not match +// the given value. func (c *FoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *FoldersGetCall) Fields(s ...googleapi.Field) *FoldersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *FoldersGetCall) IfNoneMatch(entityTag string) *FoldersGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *FoldersGetCall) Context(ctx context.Context) *FoldersGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *FoldersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8726,12 +6538,7 @@ func (c *FoldersGetCall) Header() http.Header { } func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -8753,12 +6560,10 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.folders.get" call. -// Exactly one of *Folder or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Folder.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -8789,57 +6594,8 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { return nil, err } return ret, nil - // { - // "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.", - // "httpMethod": "GET", - // "id": "storage.folders.get", - // "parameterOrder": [ - // "bucket", - // "folder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the folder resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "folder": { - // "description": "Name of a folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/folders/{folder}", - // "response": { - // "$ref": "Folder" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.folders.insert": - type FoldersInsertCall struct { s *Service bucket string @@ -8849,8 +6605,8 @@ type FoldersInsertCall struct { header_ http.Header } -// Insert: Creates a new folder. Only applicable to buckets with -// hierarchical namespace enabled. +// Insert: Creates a new folder. Only applicable to buckets with hierarchical +// namespace enabled. // // - bucket: Name of the bucket in which the folder resides. func (r *FoldersService) Insert(bucket string, folder *Folder) *FoldersInsertCall { @@ -8860,31 +6616,29 @@ func (r *FoldersService) Insert(bucket string, folder *Folder) *FoldersInsertCal return c } -// Recursive sets the optional parameter "recursive": If true, any -// parent folder which doesn’t exist will be created automatically. +// Recursive sets the optional parameter "recursive": If true, any parent +// folder which doesn’t exist will be created automatically. func (c *FoldersInsertCall) Recursive(recursive bool) *FoldersInsertCall { c.urlParams_.Set("recursive", fmt.Sprint(recursive)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *FoldersInsertCall) Fields(s ...googleapi.Field) *FoldersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *FoldersInsertCall) Context(ctx context.Context) *FoldersInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *FoldersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -8893,18 +6647,12 @@ func (c *FoldersInsertCall) Header() http.Header { } func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") @@ -8921,12 +6669,10 @@ func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.folders.insert" call. -// Exactly one of *Folder or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Folder.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -8957,44 +6703,8 @@ func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { return nil, err } return ret, nil - // { - // "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.", - // "httpMethod": "POST", - // "id": "storage.folders.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the folder resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "recursive": { - // "description": "If true, any parent folder which doesn’t exist will be created automatically.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/folders", - // "request": { - // "$ref": "Folder" - // }, - // "response": { - // "$ref": "Folder" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.folders.list": - type FoldersListCall struct { s *Service bucket string @@ -9004,8 +6714,8 @@ type FoldersListCall struct { header_ http.Header } -// List: Retrieves a list of folders matching the criteria. Only -// applicable to buckets with hierarchical namespace enabled. +// List: Retrieves a list of folders matching the criteria. Only applicable to +// buckets with hierarchical namespace enabled. // // - bucket: Name of the bucket in which to look for folders. func (r *FoldersService) List(bucket string) *FoldersListCall { @@ -9014,84 +6724,79 @@ func (r *FoldersService) List(bucket string) *FoldersListCall { return c } -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. The only supported value is '/'. If set, items -// will only contain folders that either exactly match the prefix, or -// are one level below the prefix. +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. The only supported value is '/'. If set, items will +// only contain folders that either exactly match the prefix, or are one level +// below the prefix. func (c *FoldersListCall) Delimiter(delimiter string) *FoldersListCall { c.urlParams_.Set("delimiter", delimiter) return c } -// EndOffset sets the optional parameter "endOffset": Filter results to -// folders whose names are lexicographically before endOffset. If -// startOffset is also set, the folders listed will have names between -// startOffset (inclusive) and endOffset (exclusive). +// EndOffset sets the optional parameter "endOffset": Filter results to folders +// whose names are lexicographically before endOffset. If startOffset is also +// set, the folders listed will have names between startOffset (inclusive) and +// endOffset (exclusive). func (c *FoldersListCall) EndOffset(endOffset string) *FoldersListCall { c.urlParams_.Set("endOffset", endOffset) return c } -// PageSize sets the optional parameter "pageSize": Maximum number of -// items to return in a single page of responses. +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// folders whose paths begin with this prefix. If set, the value must -// either be an empty string or end with a '/'. +// Prefix sets the optional parameter "prefix": Filter results to folders whose +// paths begin with this prefix. If set, the value must either be an empty +// string or end with a '/'. func (c *FoldersListCall) Prefix(prefix string) *FoldersListCall { c.urlParams_.Set("prefix", prefix) return c } -// StartOffset sets the optional parameter "startOffset": Filter results -// to folders whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the folders listed will have -// names between startOffset (inclusive) and endOffset (exclusive). +// StartOffset sets the optional parameter "startOffset": Filter results to +// folders whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the folders listed will have names between +// startOffset (inclusive) and endOffset (exclusive). func (c *FoldersListCall) StartOffset(startOffset string) *FoldersListCall { c.urlParams_.Set("startOffset", startOffset) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *FoldersListCall) Fields(s ...googleapi.Field) *FoldersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *FoldersListCall) IfNoneMatch(entityTag string) *FoldersListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *FoldersListCall) Context(ctx context.Context) *FoldersListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *FoldersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9100,12 +6805,7 @@ func (c *FoldersListCall) Header() http.Header { } func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9126,12 +6826,10 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.folders.list" call. -// Exactly one of *Folders or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Folders.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Folders.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -9162,66 +6860,6 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { return nil, err } return ret, nil - // { - // "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.", - // "httpMethod": "GET", - // "id": "storage.folders.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for folders.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Maximum number of items to return in a single page of responses.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.", - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/folders", - // "response": { - // "$ref": "Folders" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } // Pages invokes f for each page of results. @@ -9229,7 +6867,7 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { // The provided context supersedes any context provided to the Context method. func (c *FoldersListCall) Pages(ctx context.Context, f func(*Folders) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -9245,8 +6883,6 @@ func (c *FoldersListCall) Pages(ctx context.Context, f func(*Folders) error) err } } -// method id "storage.folders.rename": - type FoldersRenameCall struct { s *Service bucket string @@ -9257,8 +6893,8 @@ type FoldersRenameCall struct { header_ http.Header } -// Rename: Renames a source folder to a destination folder. Only -// applicable to buckets with hierarchical namespace enabled. +// Rename: Renames a source folder to a destination folder. Only applicable to +// buckets with hierarchical namespace enabled. // // - bucket: Name of the bucket in which the folders are in. // - destinationFolder: Name of the destination folder. @@ -9272,41 +6908,37 @@ func (r *FoldersService) Rename(bucket string, sourceFolder string, destinationF } // IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. func (c *FoldersRenameCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *FoldersRenameCall { c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } // IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. func (c *FoldersRenameCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *FoldersRenameCall { c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *FoldersRenameCall) Fields(s ...googleapi.Field) *FoldersRenameCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *FoldersRenameCall) Context(ctx context.Context) *FoldersRenameCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *FoldersRenameCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9315,12 +6947,7 @@ func (c *FoldersRenameCall) Header() http.Header { } func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -9340,12 +6967,11 @@ func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.folders.rename" call. -// Exactly one of *GoogleLongrunningOperation or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GoogleLongrunningOperation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -9376,62 +7002,8 @@ func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning return nil, err } return ret, nil - // { - // "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.", - // "httpMethod": "POST", - // "id": "storage.folders.rename", - // "parameterOrder": [ - // "bucket", - // "sourceFolder", - // "destinationFolder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the folders are in.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationFolder": { - // "description": "Name of the destination folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceFolder": { - // "description": "Name of the source folder.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}", - // "response": { - // "$ref": "GoogleLongrunningOperation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.managedFolders.delete": - type ManagedFoldersDeleteCall struct { s *Service bucket string @@ -9452,40 +7024,47 @@ func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *Man return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the managed folder if -// its metageneration matches this value. +// AllowNonEmpty sets the optional parameter "allowNonEmpty": Allows the +// deletion of a managed folder even if it is not empty. A managed folder is +// empty if there are no objects or managed folders that it applies to. Callers +// must have storage.managedFolders.setIamPolicy permission. +func (c *ManagedFoldersDeleteCall) AllowNonEmpty(allowNonEmpty bool) *ManagedFoldersDeleteCall { + c.urlParams_.Set("allowNonEmpty", fmt.Sprint(allowNonEmpty)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the managed folder if its metageneration matches this +// value. func (c *ManagedFoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the managed folder -// if its metageneration does not match this value. +// "ifMetagenerationNotMatch": If set, only deletes the managed folder if its +// metageneration does not match this value. func (c *ManagedFoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersDeleteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersDeleteCall) Fields(s ...googleapi.Field) *ManagedFoldersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersDeleteCall) Context(ctx context.Context) *ManagedFoldersDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9494,12 +7073,7 @@ func (c *ManagedFoldersDeleteCall) Header() http.Header { } func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -9529,52 +7103,8 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes a managed folder.", - // "httpMethod": "DELETE", - // "id": "storage.managedFolders.delete", - // "parameterOrder": [ - // "bucket", - // "managedFolder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the managed folder if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the managed folder if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "managedFolder": { - // "description": "The managed folder name/path.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders/{managedFolder}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.managedFolders.get": - type ManagedFoldersGetCall struct { s *Service bucket string @@ -9596,52 +7126,47 @@ func (r *ManagedFoldersService) Get(bucket string, managedFolder string) *Manage return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the managed folder -// metadata conditional on whether the managed folder's current -// metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the managed folder metadata conditional on whether the +// managed folder's current metageneration matches the given value. func (c *ManagedFoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the managed folder -// metadata conditional on whether the managed folder's current -// metageneration does not match the given value. +// "ifMetagenerationNotMatch": Makes the return of the managed folder metadata +// conditional on whether the managed folder's current metageneration does not +// match the given value. func (c *ManagedFoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersGetCall) Fields(s ...googleapi.Field) *ManagedFoldersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ManagedFoldersGetCall) IfNoneMatch(entityTag string) *ManagedFoldersGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersGetCall) Context(ctx context.Context) *ManagedFoldersGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9650,12 +7175,7 @@ func (c *ManagedFoldersGetCall) Header() http.Header { } func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9677,12 +7197,10 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.managedFolders.get" call. -// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedFolder.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -9713,57 +7231,8 @@ func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder return nil, err } return ret, nil - // { - // "description": "Returns metadata of the specified managed folder.", - // "httpMethod": "GET", - // "id": "storage.managedFolders.get", - // "parameterOrder": [ - // "bucket", - // "managedFolder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "managedFolder": { - // "description": "The managed folder name/path.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders/{managedFolder}", - // "response": { - // "$ref": "ManagedFolder" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.managedFolders.getIamPolicy": - type ManagedFoldersGetIamPolicyCall struct { s *Service bucket string @@ -9787,49 +7256,44 @@ func (r *ManagedFoldersService) GetIamPolicy(bucket string, managedFolder string // OptionsRequestedPolicyVersion sets the optional parameter // "optionsRequestedPolicyVersion": The IAM policy format version to be -// returned. If the optionsRequestedPolicyVersion is for an older -// version that doesn't support part of the requested IAM policy, the -// request fails. +// returned. If the optionsRequestedPolicyVersion is for an older version that +// doesn't support part of the requested IAM policy, the request fails. func (c *ManagedFoldersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ManagedFoldersGetIamPolicyCall) UserProject(userProject string) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ManagedFoldersGetIamPolicyCall) IfNoneMatch(entityTag string) *ManagedFoldersGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersGetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersGetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9838,12 +7302,7 @@ func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { } func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -9865,12 +7324,10 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.managedFolders.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -9901,57 +7358,8 @@ func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli return nil, err } return ret, nil - // { - // "description": "Returns an IAM policy for the specified managed folder.", - // "httpMethod": "GET", - // "id": "storage.managedFolders.getIamPolicy", - // "parameterOrder": [ - // "bucket", - // "managedFolder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "managedFolder": { - // "description": "The managed folder name/path.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "optionsRequestedPolicyVersion": { - // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - // "format": "int32", - // "location": "query", - // "minimum": "1", - // "type": "integer" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.managedFolders.insert": - type ManagedFoldersInsertCall struct { s *Service bucket string @@ -9972,23 +7380,21 @@ func (r *ManagedFoldersService) Insert(bucket string, managedfolder *ManagedFold } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersInsertCall) Fields(s ...googleapi.Field) *ManagedFoldersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersInsertCall) Context(ctx context.Context) *ManagedFoldersInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -9997,18 +7403,12 @@ func (c *ManagedFoldersInsertCall) Header() http.Header { } func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") @@ -10025,12 +7425,10 @@ func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.managedFolders.insert" call. -// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedFolder.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -10061,39 +7459,8 @@ func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFol return nil, err } return ret, nil - // { - // "description": "Creates a new managed folder.", - // "httpMethod": "POST", - // "id": "storage.managedFolders.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders", - // "request": { - // "$ref": "ManagedFolder" - // }, - // "response": { - // "$ref": "ManagedFolder" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.managedFolders.list": - type ManagedFoldersListCall struct { s *Service bucket string @@ -10112,56 +7479,51 @@ func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall { return c } -// PageSize sets the optional parameter "pageSize": Maximum number of -// items to return in a single page of responses. +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *ManagedFoldersListCall) PageToken(pageToken string) *ManagedFoldersListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": The managed folder -// name/path prefix to filter the output list of results. +// Prefix sets the optional parameter "prefix": The managed folder name/path +// prefix to filter the output list of results. func (c *ManagedFoldersListCall) Prefix(prefix string) *ManagedFoldersListCall { c.urlParams_.Set("prefix", prefix) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersListCall) Fields(s ...googleapi.Field) *ManagedFoldersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ManagedFoldersListCall) IfNoneMatch(entityTag string) *ManagedFoldersListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersListCall) Context(ctx context.Context) *ManagedFoldersListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10170,12 +7532,7 @@ func (c *ManagedFoldersListCall) Header() http.Header { } func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10196,12 +7553,10 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.managedFolders.list" call. -// Exactly one of *ManagedFolders or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedFolders.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolders.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolders, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -10232,51 +7587,6 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde return nil, err } return ret, nil - // { - // "description": "Lists managed folders in the given bucket.", - // "httpMethod": "GET", - // "id": "storage.managedFolders.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "Maximum number of items to return in a single page of responses.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "The managed folder name/path prefix to filter the output list of results.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders", - // "response": { - // "$ref": "ManagedFolders" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } // Pages invokes f for each page of results. @@ -10284,7 +7594,7 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde // The provided context supersedes any context provided to the Context method. func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolders) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -10300,8 +7610,6 @@ func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolde } } -// method id "storage.managedFolders.setIamPolicy": - type ManagedFoldersSetIamPolicyCall struct { s *Service bucket string @@ -10324,31 +7632,29 @@ func (r *ManagedFoldersService) SetIamPolicy(bucket string, managedFolder string return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ManagedFoldersSetIamPolicyCall) UserProject(userProject string) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersSetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersSetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10357,18 +7663,12 @@ func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { } func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") @@ -10386,12 +7686,10 @@ func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.managedFolders.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -10422,50 +7720,8 @@ func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli return nil, err } return ret, nil - // { - // "description": "Updates an IAM policy for the specified managed folder.", - // "httpMethod": "PUT", - // "id": "storage.managedFolders.setIamPolicy", - // "parameterOrder": [ - // "bucket", - // "managedFolder" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "managedFolder": { - // "description": "The managed folder name/path.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.managedFolders.testIamPermissions": - type ManagedFoldersTestIamPermissionsCall struct { s *Service bucket string @@ -10476,8 +7732,8 @@ type ManagedFoldersTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests a set of permissions on the given managed -// folder to see which, if any, are held by the caller. +// TestIamPermissions: Tests a set of permissions on the given managed folder +// to see which, if any, are held by the caller. // // - bucket: Name of the bucket containing the managed folder. // - managedFolder: The managed folder name/path. @@ -10490,41 +7746,37 @@ func (r *ManagedFoldersService) TestIamPermissions(bucket string, managedFolder return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ManagedFoldersTestIamPermissionsCall) UserProject(userProject string) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ManagedFoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ManagedFoldersTestIamPermissionsCall) IfNoneMatch(entityTag string) *ManagedFoldersTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ManagedFoldersTestIamPermissionsCall) Context(ctx context.Context) *ManagedFoldersTestIamPermissionsCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10533,12 +7785,7 @@ func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { } func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10560,12 +7807,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp } // Do executes the "storage.managedFolders.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -10596,58 +7842,8 @@ func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) return nil, err } return ret, nil - // { - // "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.managedFolders.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "managedFolder", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket containing the managed folder.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "managedFolder": { - // "description": "The managed folder name/path.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.notifications.delete": - type NotificationsDeleteCall struct { s *Service bucket string @@ -10668,31 +7864,29 @@ func (r *NotificationsService) Delete(bucket string, notification string) *Notif return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *NotificationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10701,12 +7895,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { } func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -10736,45 +7925,8 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes a notification subscription.", - // "httpMethod": "DELETE", - // "id": "storage.notifications.delete", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "ID of the notification to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.notifications.get": - type NotificationsGetCall struct { s *Service bucket string @@ -10796,41 +7948,37 @@ func (r *NotificationsService) Get(bucket string, notification string) *Notifica return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *NotificationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10839,12 +7987,7 @@ func (c *NotificationsGetCall) Header() http.Header { } func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -10866,12 +8009,10 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.notifications.get" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -10902,50 +8043,8 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, return nil, err } return ret, nil - // { - // "description": "View a notification configuration.", - // "httpMethod": "GET", - // "id": "storage.notifications.get", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "Notification ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.notifications.insert": - type NotificationsInsertCall struct { s *Service bucket string @@ -10965,31 +8064,29 @@ func (r *NotificationsService) Insert(bucket string, notification *Notification) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *NotificationsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -10998,18 +8095,12 @@ func (c *NotificationsInsertCall) Header() http.Header { } func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") @@ -11026,12 +8117,10 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.notifications.insert" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -11062,44 +8151,8 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio return nil, err } return ret, nil - // { - // "description": "Creates a notification subscription for a given bucket.", - // "httpMethod": "POST", - // "id": "storage.notifications.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "request": { - // "$ref": "Notification" - // }, - // "response": { - // "$ref": "Notification" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.notifications.list": - type NotificationsListCall struct { s *Service bucket string @@ -11109,8 +8162,7 @@ type NotificationsListCall struct { header_ http.Header } -// List: Retrieves a list of notification subscriptions for a given -// bucket. +// List: Retrieves a list of notification subscriptions for a given bucket. // // - bucket: Name of a Google Cloud Storage bucket. func (r *NotificationsService) List(bucket string) *NotificationsListCall { @@ -11119,41 +8171,37 @@ func (r *NotificationsService) List(bucket string) *NotificationsListCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *NotificationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -11162,12 +8210,7 @@ func (c *NotificationsListCall) Header() http.Header { } func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11188,12 +8231,10 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.notifications.list" call. -// Exactly one of *Notifications or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notifications.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -11224,43 +8265,8 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications return nil, err } return ret, nil - // { - // "description": "Retrieves a list of notification subscriptions for a given bucket.", - // "httpMethod": "GET", - // "id": "storage.notifications.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a Google Cloud Storage bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs", - // "response": { - // "$ref": "Notifications" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objectAccessControls.delete": - type ObjectAccessControlsDeleteCall struct { s *Service bucket string @@ -11271,15 +8277,15 @@ type ObjectAccessControlsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. +// Delete: Permanently deletes the ACL entry for the specified entity on the +// specified object. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11289,39 +8295,37 @@ func (r *ObjectAccessControlsService) Delete(bucket string, object string, entit return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -11330,12 +8334,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { } func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -11366,57 +8365,8 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error return gensupport.WrapError(err) } return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objectAccessControls.get": - type ObjectAccessControlsGetCall struct { s *Service bucket string @@ -11428,15 +8378,14 @@ type ObjectAccessControlsGetCall struct { header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// object. +// Get: Returns the ACL entry for the specified entity on the specified object. // // - bucket: Name of a bucket. // - entity: The entity holding the permission. Can be user-userId, // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11446,49 +8395,45 @@ func (r *ObjectAccessControlsService) Get(bucket string, object string, entity s return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -11497,12 +8442,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { } func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11525,12 +8465,11 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err } // Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -11561,60 +8500,8 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA return nil, err } return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objectAccessControls.insert": - type ObjectAccessControlsInsertCall struct { s *Service bucket string @@ -11628,8 +8515,8 @@ type ObjectAccessControlsInsertCall struct { // Insert: Creates a new ACL entry on the specified object. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11639,39 +8526,37 @@ func (r *ObjectAccessControlsService) Insert(bucket string, object string, objec return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -11680,18 +8565,12 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { } func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") @@ -11709,12 +8588,11 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.objectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -11745,56 +8623,8 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje return nil, err } return ret, nil - // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objectAccessControls.list": - type ObjectAccessControlsListCall struct { s *Service bucket string @@ -11808,8 +8638,8 @@ type ObjectAccessControlsListCall struct { // List: Retrieves ACL entries on the specified object. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11818,49 +8648,45 @@ func (r *ObjectAccessControlsService) List(bucket string, object string) *Object return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -11869,12 +8695,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { } func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -11896,12 +8717,11 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er } // Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -11932,53 +8752,8 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object return nil, err } return ret, nil - // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objectAccessControls.patch": - type ObjectAccessControlsPatchCall struct { s *Service bucket string @@ -11996,8 +8771,8 @@ type ObjectAccessControlsPatchCall struct { // - entity: The entity holding the permission. Can be user-userId, // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12008,39 +8783,37 @@ func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -12049,18 +8822,12 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { } func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") @@ -12079,12 +8846,11 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } // Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -12115,63 +8881,8 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec return nil, err } return ret, nil - // { - // "description": "Patches an ACL entry on the specified object.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objectAccessControls.update": - type ObjectAccessControlsUpdateCall struct { s *Service bucket string @@ -12189,8 +8900,8 @@ type ObjectAccessControlsUpdateCall struct { // - entity: The entity holding the permission. Can be user-userId, // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12201,39 +8912,37 @@ func (r *ObjectAccessControlsService) Update(bucket string, object string, entit return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -12242,18 +8951,12 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { } func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") @@ -12272,12 +8975,11 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -12308,63 +9010,8 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje return nil, err } return ret, nil - // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objects.bulkRestore": - type ObjectsBulkRestoreCall struct { s *Service bucket string @@ -12386,23 +9033,21 @@ func (r *ObjectsService) BulkRestore(bucket string, bulkrestoreobjectsrequest *B } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsBulkRestoreCall) Fields(s ...googleapi.Field) *ObjectsBulkRestoreCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsBulkRestoreCall) Context(ctx context.Context) *ObjectsBulkRestoreCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsBulkRestoreCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -12411,18 +9056,12 @@ func (c *ObjectsBulkRestoreCall) Header() http.Header { } func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/bulkRestore") @@ -12439,12 +9078,11 @@ func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.bulkRestore" call. -// Exactly one of *GoogleLongrunningOperation or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GoogleLongrunningOperation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -12475,39 +9113,8 @@ func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongru return nil, err } return ret, nil - // { - // "description": "Initiates a long-running bulk restore operation on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.bulkRestore", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/bulkRestore", - // "request": { - // "$ref": "BulkRestoreObjectsRequest" - // }, - // "response": { - // "$ref": "GoogleLongrunningOperation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.compose": - type ObjectsComposeCall struct { s *Service destinationBucket string @@ -12518,14 +9125,13 @@ type ObjectsComposeCall struct { header_ http.Header } -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. +// Compose: Concatenates a list of existing objects into a new object in the +// same bucket. // -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts +// - destinationBucket: Name of the bucket containing the source objects. The +// destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12536,8 +9142,8 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str } // DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. // // Possible values: // @@ -12545,81 +9151,77 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of the +// Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsComposeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -12628,18 +9230,12 @@ func (c *ObjectsComposeCall) Header() http.Header { } func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") @@ -12657,12 +9253,10 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -12693,89 +9287,8 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", - // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.copy": - type ObjectsCopyCall struct { s *Service sourceBucket string @@ -12788,22 +9301,20 @@ type ObjectsCopyCall struct { header_ http.Header } -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. +// Copy: Copies a source object to a destination object. Optionally overrides +// metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts +// - destinationBucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any.For +// information about how to URL encode object names to be path safe, see +// Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts +// metadata is not otherwise provided. Overrides the object metadata's name +// value, if any. +// - sourceBucket: Name of the bucket in which to find the source object. +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12815,22 +9326,19 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat return c } -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. +// DestinationKmsKeyName sets the optional parameter "destinationKmsKeyName": +// Resource name of the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) return c } // DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. // // Possible values: // @@ -12838,103 +9346,97 @@ func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *O // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. Setting to 0 makes the -// operation succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the destination object's current generation +// matches the given value. Setting to 0 makes the operation succeed only if +// there are no live versions of the object. func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. If no live object exists, the precondition fails. Setting to 0 -// makes the operation succeed only if there is a live version of the -// object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the destination object's current +// generation does not match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object. func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } // IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } // IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } // IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. // // Possible values: // @@ -12945,39 +9447,37 @@ func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). +// SourceGeneration sets the optional parameter "sourceGeneration": If present, +// selects a specific revision of the source object (as opposed to the latest +// version, the default). func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsCopyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -12986,18 +9486,12 @@ func (c *ObjectsCopyCall) Header() http.Header { } func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") @@ -13017,12 +9511,10 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -13053,158 +9545,8 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.delete": - type ObjectsDeleteCall struct { s *Service bucket string @@ -13214,13 +9556,13 @@ type ObjectsDeleteCall struct { header_ http.Header } -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. +// Delete: Deletes an object and its metadata. Deletions are permanent if +// versioning is not enabled for the bucket, or if the generation parameter is +// used. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -13229,75 +9571,72 @@ func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall return c } -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). +// Generation sets the optional parameter "generation": If present, permanently +// deletes a specific revision of this object (as opposed to the latest +// version, the default). func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -13306,12 +9645,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { } func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -13341,75 +9675,8 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.get": - type ObjectsGetCall struct { s *Service bucket string @@ -13423,8 +9690,8 @@ type ObjectsGetCall struct { // Get: Retrieves an object or its metadata. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -13433,52 +9700,51 @@ func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. // // Possible values: // @@ -13490,48 +9756,44 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { } // SoftDeleted sets the optional parameter "softDeleted": If true, only -// soft-deleted object versions will be listed. The default is false. -// For more information, see Soft Delete. +// soft-deleted object versions will be listed. The default is false. For more +// information, see Soft Delete. func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. +// Context sets the context to be used in this call's Do and Download methods. func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -13540,12 +9802,7 @@ func (c *ObjectsGetCall) Header() http.Header { } func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13583,12 +9840,10 @@ func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, } // Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -13619,100 +9874,8 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Retrieves an object or its metadata.", - // "httpMethod": "GET", - // "id": "storage.objects.get", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "softDeleted": { - // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", - // "location": "query", - // "type": "boolean" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - } -// method id "storage.objects.getIamPolicy": - type ObjectsGetIamPolicyCall struct { s *Service bucket string @@ -13726,8 +9889,8 @@ type ObjectsGetIamPolicyCall struct { // GetIamPolicy: Returns an IAM policy for the specified object. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -13736,49 +9899,45 @@ func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetI return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -13787,12 +9946,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { } func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -13814,12 +9968,10 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.objects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -13850,56 +10002,8 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err return nil, err } return ret, nil - // { - // "description": "Returns an IAM policy for the specified object.", - // "httpMethod": "GET", - // "id": "storage.objects.getIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.insert": - type ObjectsInsertCall struct { s *Service bucket string @@ -13913,8 +10017,8 @@ type ObjectsInsertCall struct { // Insert: Stores a new object and metadata. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. +// - bucket: Name of the bucket in which to store the new object. Overrides the +// provided object metadata's bucket value, if any. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -13922,69 +10026,65 @@ func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCal return c } -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. +// ContentEncoding sets the optional parameter "contentEncoding": If set, sets +// the contentEncoding property of the final object to this value. Setting this +// parameter is equivalent to setting the contentEncoding metadata property. +// This can be useful when uploading an object with uploadType=media to +// indicate the encoding of the content being uploaded. func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { c.urlParams_.Set("contentEncoding", contentEncoding) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of the +// Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c } -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts +// Name sets the optional parameter "name": Name of the object. Required when +// the object metadata is not otherwise provided. Overrides the object +// metadata's name value, if any. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.urlParams_.Set("name", name) @@ -14000,30 +10100,30 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. // // Possible values: // @@ -14034,20 +10134,19 @@ func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { c.urlParams_.Set("userProject", userProject) return c } -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by +// Media specifies the media to upload in one or more chunks. The chunk size +// may be controlled by supplying a MediaOption generated by // googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the upload +// request will be determined by sniffing the contents of r, unless a +// MediaOption generated by googleapi.ContentType is supplied. // At most one of Media and ResumableMedia may be set. func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { if ct := c.object.ContentType; ct != "" { @@ -14057,46 +10156,45 @@ func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) return c } -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. +// ResumableMedia specifies the media to upload in chunks and can be canceled +// with ctx. // // Deprecated: use Media instead. // -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. +// At most one of Media and ResumableMedia may be set. mediaType identifies the +// MIME media type of the upload, such as "image/png". If mediaType is "", it +// will be auto-detected. The provided ctx will supersede any context +// previously provided to the Context method. func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { c.ctx_ = ctx c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) return c } -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). +// ProgressUpdater provides a callback function that will be called after every +// chunk. It should be a low-latency function in order to not slow down the +// upload operation. This should only be called when using ResumableMedia (as +// opposed to Media). func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { c.mediaInfo_.SetProgressUpdater(pu) return c } -// WithRetry causes the library to retry the initial request of the -// upload(for resumable uploads) or the entire upload (for multipart -// uploads) ifa transient error occurs. This is contingent on ChunkSize -// being > 0 (sothat the input data may be buffered). The backoff -// argument will be used todetermine exponential backoff timing, and the -// errorFunc is used to determinewhich errors are considered retryable. -// By default, exponetial backoff will beapplied using gax defaults, and -// the following errors are retried: +// WithRetry causes the library to retry the initial request of the upload(for +// resumable uploads) or the entire upload (for multipart uploads) ifa +// transient error occurs. This is contingent on ChunkSize being > 0 (sothat +// the input data may be buffered). The backoff argument will be used +// todetermine exponential backoff timing, and the errorFunc is used to +// determinewhich errors are considered retryable. By default, exponetial +// backoff will beapplied using gax defaults, and the following errors are +// retried: // // - HTTP responses with codes 408, 429, 502, 503, and 504. // // - Transient network errors such as connection reset and // io.ErrUnexpectedEOF. // -// - Errors which are considered transient using the Temporary() -// interface. +// - Errors which are considered transient using the Temporary() interface. // // - Wrapped versions of these errors. func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { @@ -14108,16 +10206,14 @@ func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. // This context will supersede any context previously provided to the // ResumableMedia method. func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { @@ -14125,8 +10221,8 @@ func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -14135,18 +10231,12 @@ func (c *ObjectsInsertCall) Header() http.Header { } func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") @@ -14177,12 +10267,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.insert" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -14231,133 +10319,8 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Stores a new object and metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaUpload": true - // } - } -// method id "storage.objects.list": - type ObjectsListCall struct { s *Service bucket string @@ -14376,30 +10339,29 @@ func (r *ObjectsService) List(bucket string) *ObjectsListCall { return c } -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. items will contain only objects whose names, aside from +// the prefix, do not contain delimiter. Objects whose names, aside from the +// prefix, contain delimiter will have their name, truncated after the +// delimiter, returned in prefixes. Duplicate prefixes are omitted. func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { c.urlParams_.Set("delimiter", delimiter) return c } -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). +// EndOffset sets the optional parameter "endOffset": Filter results to objects +// whose names are lexicographically before endOffset. If startOffset is also +// set, the objects listed will have names between startOffset (inclusive) and +// endOffset (exclusive). func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { c.urlParams_.Set("endOffset", endOffset) return c } // IncludeFoldersAsPrefixes sets the optional parameter -// "includeFoldersAsPrefixes": Only applicable if delimiter is set to -// '/'. If true, will also include folders and managed folders (besides -// objects) in the returned prefixes. +// "includeFoldersAsPrefixes": Only applicable if delimiter is set to '/'. If +// true, will also include folders and managed folders (besides objects) in the +// returned prefixes. func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool) *ObjectsListCall { c.urlParams_.Set("includeFoldersAsPrefixes", fmt.Sprint(includeFoldersAsPrefixes)) return c @@ -14407,47 +10369,45 @@ func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. +// instance of delimiter will have their metadata included in items in addition +// to prefixes. func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) return c } -// MatchGlob sets the optional parameter "matchGlob": Filter results to -// objects and prefixes that match this glob pattern. +// MatchGlob sets the optional parameter "matchGlob": Filter results to objects +// and prefixes that match this glob pattern. func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { c.urlParams_.Set("matchGlob", matchGlob) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// plus prefixes to return in a single page of responses. As duplicate prefixes +// are omitted, fewer total results may be returned than requested. The service +// will use this parameter or 1,000 items, whichever is smaller. func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. +// Prefix sets the optional parameter "prefix": Filter results to objects whose +// names begin with this prefix. func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { c.urlParams_.Set("prefix", prefix) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. // // Possible values: // @@ -14459,65 +10419,61 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { } // SoftDeleted sets the optional parameter "softDeleted": If true, only -// soft-deleted object versions will be listed. The default is false. -// For more information, see Soft Delete. +// soft-deleted object versions will be listed. The default is false. For more +// information, see Soft Delete. func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c } -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). +// StartOffset sets the optional parameter "startOffset": Filter results to +// objects whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { c.urlParams_.Set("startOffset", startOffset) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { c.urlParams_.Set("userProject", userProject) return c } -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. +// Versions sets the optional parameter "versions": If true, lists all versions +// of an object as distinct results. The default is false. For more +// information, see Object Versioning. func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -14526,12 +10482,7 @@ func (c *ObjectsListCall) Header() http.Header { } func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -14552,12 +10503,10 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -14588,111 +10537,6 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { return nil, err } return ret, nil - // { - // "description": "Retrieves a list of objects matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.objects.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "includeFoldersAsPrefixes": { - // "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "matchGlob": { - // "description": "Filter results to objects and prefixes that match this glob pattern.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "softDeleted": { - // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", - // "location": "query", - // "type": "boolean" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o", - // "response": { - // "$ref": "Objects" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - } // Pages invokes f for each page of results. @@ -14700,7 +10544,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // The provided context supersedes any context provided to the Context method. func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -14716,8 +10560,6 @@ func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) err } } -// method id "storage.objects.patch": - type ObjectsPatchCall struct { s *Service bucket string @@ -14731,8 +10573,8 @@ type ObjectsPatchCall struct { // Patch: Patches an object's metadata. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -14742,45 +10584,44 @@ func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *O return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c @@ -14788,8 +10629,8 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // OverrideUnlockedRetention sets the optional parameter // "overrideUnlockedRetention": Must be true to remove the retention -// configuration, reduce its unlocked retention period, or change its -// mode from unlocked to locked. +// configuration, reduce its unlocked retention period, or change its mode from +// unlocked to locked. func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsPatchCall { c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) return c @@ -14804,29 +10645,29 @@ func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention b // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. // // Possible values: // @@ -14837,31 +10678,29 @@ func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request, for Requester Pays buckets. func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -14870,18 +10709,12 @@ func (c *ObjectsPatchCall) Header() http.Header { } func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") @@ -14899,12 +10732,10 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -14935,124 +10766,12 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Patches an object's metadata.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "overrideUnlockedRetention": { - // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", - // "location": "query", - // "type": "boolean" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objects.restore": - type ObjectsRestoreCall struct { s *Service bucket string object string - object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header @@ -15062,62 +10781,61 @@ type ObjectsRestoreCall struct { // // - bucket: Name of the bucket in which the object resides. // - generation: Selects a specific revision of this object. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Restore(bucket string, object string, object2 *Object) *ObjectsRestoreCall { +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts. +func (r *ObjectsService) Restore(bucket string, object string, generation int64) *ObjectsRestoreCall { c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.object2 = object2 + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// CopySourceAcl sets the optional parameter "copySourceAcl": If true, -// copies the source object's ACL; otherwise, uses the bucket's default -// object ACL. The default is false. +// CopySourceAcl sets the optional parameter "copySourceAcl": If true, copies +// the source object's ACL; otherwise, uses the bucket's default object ACL. +// The default is false. func (c *ObjectsRestoreCall) CopySourceAcl(copySourceAcl bool) *ObjectsRestoreCall { c.urlParams_.Set("copySourceAcl", fmt.Sprint(copySourceAcl)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's one live -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's one live generation matches +// the given value. Setting to 0 makes the operation succeed only if there are +// no live versions of the object. func (c *ObjectsRestoreCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRestoreCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// none of the object's live generations match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether none of the object's live +// generations match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object. func (c *ObjectsRestoreCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRestoreCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's one live metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's one live +// metageneration matches the given value. func (c *ObjectsRestoreCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRestoreCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether none of the object's live metagenerations match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether none +// of the object's live metagenerations match the given value. func (c *ObjectsRestoreCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRestoreCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. // // Possible values: // @@ -15128,31 +10846,29 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsRestoreCall) Fields(s ...googleapi.Field) *ObjectsRestoreCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsRestoreCall) Context(ctx context.Context) *ObjectsRestoreCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsRestoreCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -15161,18 +10877,8 @@ func (c *ObjectsRestoreCall) Header() http.Header { } func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") @@ -15190,12 +10896,10 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.restore" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -15226,99 +10930,8 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Restores a soft-deleted object.", - // "httpMethod": "POST", - // "id": "storage.objects.restore", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "copySourceAcl": { - // "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", - // "location": "query", - // "type": "boolean" - // }, - // "generation": { - // "description": "Selects a specific revision of this object.", - // "format": "int64", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/restore", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objects.rewrite": - type ObjectsRewriteCall struct { s *Service sourceBucket string @@ -15334,19 +10947,16 @@ type ObjectsRewriteCall struct { // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. +// - destinationBucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. // - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts +// metadata is not otherwise provided. Overrides the object metadata's name +// value, if any. For information about how to URL encode object names to be +// path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts +// - sourceBucket: Name of the bucket in which to find the source object. +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -15358,22 +10968,19 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti return c } -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. +// DestinationKmsKeyName sets the optional parameter "destinationKmsKeyName": +// Resource name of the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) return c } // DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. // // Possible values: // @@ -15381,94 +10988,89 @@ func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } // IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } // IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } // IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c @@ -15476,21 +11078,20 @@ func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerati // MaxBytesRewrittenPerCall sets the optional parameter // "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If -// specified the value must be an integral multiple of 1 MiB (1048576). -// Also, this only applies to requests where the source and destination -// span locations and/or storage classes. Finally, this value must not -// change across rewrite calls else you'll get an error that the -// rewriteToken is invalid. +// rewritten per rewrite request. Most callers shouldn't need to specify this +// parameter - it is primarily in place to support testing. If specified the +// value must be an integral multiple of 1 MiB (1048576). Also, this only +// applies to requests where the source and destination span locations and/or +// storage classes. Finally, this value must not change across rewrite calls +// else you'll get an error that the rewriteToken is invalid. func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. // // Possible values: // @@ -15501,50 +11102,47 @@ func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { return c } -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. +// RewriteToken sets the optional parameter "rewriteToken": Include this field +// (from the previous rewrite response) on each rewrite request after the first +// one, until the rewrite response 'done' flag is true. Calls that provide a +// rewriteToken can omit all other request fields, but if included those fields +// must match the values provided in the first rewrite request. func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { c.urlParams_.Set("rewriteToken", rewriteToken) return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). +// SourceGeneration sets the optional parameter "sourceGeneration": If present, +// selects a specific revision of the source object (as opposed to the latest +// version, the default). func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsRewriteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -15553,18 +11151,12 @@ func (c *ObjectsRewriteCall) Header() http.Header { } func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") @@ -15584,12 +11176,11 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -15620,169 +11211,8 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, return nil, err } return ret, nil - // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "RewriteResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.setIamPolicy": - type ObjectsSetIamPolicyCall struct { s *Service bucket string @@ -15796,8 +11226,8 @@ type ObjectsSetIamPolicyCall struct { // SetIamPolicy: Updates an IAM policy for the specified object. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -15807,39 +11237,37 @@ func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Poli return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -15848,18 +11276,12 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { } func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") @@ -15877,12 +11299,10 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.objects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -15913,57 +11333,8 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err return nil, err } return ret, nil - // { - // "description": "Updates an IAM policy for the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objects.setIamPolicy", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.testIamPermissions": - type ObjectsTestIamPermissionsCall struct { s *Service bucket string @@ -15974,12 +11345,12 @@ type ObjectsTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests a set of permissions on the given object to -// see which, if any, are held by the caller. +// TestIamPermissions: Tests a set of permissions on the given object to see +// which, if any, are held by the caller. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). // - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { @@ -15990,49 +11361,45 @@ func (r *ObjectsService) TestIamPermissions(bucket string, object string, permis return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -16041,12 +11408,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { } func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16068,12 +11430,11 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } // Do executes the "storage.objects.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -16104,64 +11465,8 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI return nil, err } return ret, nil - // { - // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.objects.testIamPermissions", - // "parameterOrder": [ - // "bucket", - // "object", - // "permissions" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "permissions": { - // "description": "Permissions to test.", - // "location": "query", - // "repeated": true, - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/iam/testPermissions", - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.objects.update": - type ObjectsUpdateCall struct { s *Service bucket string @@ -16175,8 +11480,8 @@ type ObjectsUpdateCall struct { // Update: Updates an object's metadata. // // - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -16186,45 +11491,44 @@ func (r *ObjectsService) Update(bucket string, object string, object2 *Object) * return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c @@ -16232,8 +11536,8 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // OverrideUnlockedRetention sets the optional parameter // "overrideUnlockedRetention": Must be true to remove the retention -// configuration, reduce its unlocked retention period, or change its -// mode from unlocked to locked. +// configuration, reduce its unlocked retention period, or change its mode from +// unlocked to locked. func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsUpdateCall { c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) return c @@ -16248,29 +11552,29 @@ func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention // // allAuthenticatedUsers get READER access. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project // -// project team owners get OWNER access. +// team owners get OWNER access. // -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// "bucketOwnerRead" - Object owner gets OWNER access, and project team // -// team owners get READER access. +// owners get READER access. // // "private" - Object owner gets OWNER access. // "projectPrivate" - Object owner gets OWNER access, and project team // // members get access according to their roles. // -// "publicRead" - Object owner gets OWNER access, and allUsers get +// "publicRead" - Object owner gets OWNER access, and allUsers get READER // -// READER access. +// access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. // // Possible values: // @@ -16281,31 +11585,29 @@ func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -16314,18 +11616,12 @@ func (c *ObjectsUpdateCall) Header() http.Header { } func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") @@ -16343,12 +11639,10 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -16379,119 +11673,8 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { return nil, err } return ret, nil - // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "overrideUnlockedRetention": { - // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", - // "location": "query", - // "type": "boolean" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.objects.watchAll": - type ObjectsWatchAllCall struct { s *Service bucket string @@ -16511,21 +11694,20 @@ func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatch return c } -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. items will contain only objects whose names, aside from +// the prefix, do not contain delimiter. Objects whose names, aside from the +// prefix, contain delimiter will have their name, truncated after the +// delimiter, returned in prefixes. Duplicate prefixes are omitted. func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { c.urlParams_.Set("delimiter", delimiter) return c } -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). +// EndOffset sets the optional parameter "endOffset": Filter results to objects +// whose names are lexicographically before endOffset. If startOffset is also +// set, the objects listed will have names between startOffset (inclusive) and +// endOffset (exclusive). func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { c.urlParams_.Set("endOffset", endOffset) return c @@ -16533,40 +11715,38 @@ func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. +// instance of delimiter will have their metadata included in items in addition +// to prefixes. func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// plus prefixes to return in a single page of responses. As duplicate prefixes +// are omitted, fewer total results may be returned than requested. The service +// will use this parameter or 1,000 items, whichever is smaller. func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. +// Prefix sets the optional parameter "prefix": Filter results to objects whose +// names begin with this prefix. func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { c.urlParams_.Set("prefix", prefix) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. // // Possible values: // @@ -16577,48 +11757,46 @@ func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall return c } -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). +// StartOffset sets the optional parameter "startOffset": Filter results to +// objects whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { c.urlParams_.Set("startOffset", startOffset) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { c.urlParams_.Set("userProject", userProject) return c } -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. +// Versions sets the optional parameter "versions": If true, lists all versions +// of an object as distinct results. The default is false. For more +// information, see Object Versioning. func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ObjectsWatchAllCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -16627,18 +11805,12 @@ func (c *ObjectsWatchAllCall) Header() http.Header { } func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") @@ -16655,12 +11827,10 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -16691,104 +11861,8 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) return nil, err } return ret, nil - // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "response": { - // "$ref": "Channel" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - } -// method id "storage.buckets.operations.cancel": - type OperationsCancelCall struct { s *Service bucket string @@ -16798,9 +11872,9 @@ type OperationsCancelCall struct { header_ http.Header } -// Cancel: Starts asynchronous cancellation on a long-running operation. -// The server makes a best effort to cancel the operation, but success -// is not guaranteed. +// Cancel: Starts asynchronous cancellation on a long-running operation. The +// server makes a best effort to cancel the operation, but success is not +// guaranteed. // // - bucket: The parent bucket of the operation resource. // - operationId: The ID of the operation resource. @@ -16812,23 +11886,21 @@ func (r *OperationsService) Cancel(bucket string, operationId string) *Operation } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *OperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -16837,12 +11909,7 @@ func (c *OperationsCancelCall) Header() http.Header { } func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -16872,40 +11939,8 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", - // "httpMethod": "POST", - // "id": "storage.buckets.operations.cancel", - // "parameterOrder": [ - // "bucket", - // "operationId" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the operation resource.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "operationId": { - // "description": "The ID of the operation resource.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/operations/{operationId}/cancel", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.operations.get": - type OperationsGetCall struct { s *Service bucket string @@ -16928,33 +11963,29 @@ func (r *OperationsService) Get(bucket string, operationId string) *OperationsGe } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *OperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -16963,12 +11994,7 @@ func (c *OperationsGetCall) Header() http.Header { } func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -16990,12 +12016,11 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.operations.get" call. -// Exactly one of *GoogleLongrunningOperation or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *GoogleLongrunningOperation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -17026,45 +12051,8 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning return nil, err } return ret, nil - // { - // "description": "Gets the latest state of a long-running operation.", - // "httpMethod": "GET", - // "id": "storage.buckets.operations.get", - // "parameterOrder": [ - // "bucket", - // "operationId" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the operation resource.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "operationId": { - // "description": "The ID of the operation resource.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/operations/{operationId}", - // "response": { - // "$ref": "GoogleLongrunningOperation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.buckets.operations.list": - type OperationsListCall struct { s *Service bucket string @@ -17074,8 +12062,7 @@ type OperationsListCall struct { header_ http.Header } -// List: Lists operations that match the specified filter in the -// request. +// List: Lists operations that match the specified filter in the request. // // - bucket: Name of the bucket in which to look for operations. func (r *OperationsService) List(bucket string) *OperationsListCall { @@ -17084,59 +12071,54 @@ func (r *OperationsService) List(bucket string) *OperationsListCall { return c } -// Filter sets the optional parameter "filter": A filter to narrow down -// results to a preferred subset. The filtering language is documented -// in more detail in AIP-160 (https://google.aip.dev/160). +// Filter sets the optional parameter "filter": A filter to narrow down results +// to a preferred subset. The filtering language is documented in more detail +// in AIP-160 (https://google.aip.dev/160). func (c *OperationsListCall) Filter(filter string) *OperationsListCall { c.urlParams_.Set("filter", filter) return c } -// PageSize sets the optional parameter "pageSize": Maximum number of -// items to return in a single page of responses. Fewer total results -// may be returned than requested. The service uses this parameter or -// 100 items, whichever is smaller. +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. Fewer total results may be returned +// than requested. The service uses this parameter or 100 items, whichever is +// smaller. func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *OperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -17145,12 +12127,7 @@ func (c *OperationsListCall) Header() http.Header { } func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17171,13 +12148,11 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.operations.list" call. -// Exactly one of *GoogleLongrunningListOperationsResponse or error will -// be non-nil. Any non-2xx status code is an error. Response headers are -// in either -// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningListOperationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -17208,51 +12183,6 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin return nil, err } return ret, nil - // { - // "description": "Lists operations that match the specified filter in the request.", - // "httpMethod": "GET", - // "id": "storage.buckets.operations.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for operations.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "filter": { - // "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/operations", - // "response": { - // "$ref": "GoogleLongrunningListOperationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } // Pages invokes f for each page of results. @@ -17260,7 +12190,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin // The provided context supersedes any context provided to the Context method. func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunningListOperationsResponse) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -17276,8 +12206,6 @@ func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunnin } } -// method id "storage.projects.hmacKeys.create": - type ProjectsHmacKeysCreateCall struct { s *Service projectId string @@ -17297,31 +12225,29 @@ func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail s return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsHmacKeysCreateCall) UserProject(userProject string) *ProjectsHmacKeysCreateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsHmacKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsHmacKeysCreateCall) Context(ctx context.Context) *ProjectsHmacKeysCreateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsHmacKeysCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -17330,12 +12256,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { } func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -17353,12 +12274,10 @@ func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, erro } // Do executes the "storage.projects.hmacKeys.create" call. -// Exactly one of *HmacKey or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *HmacKey.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKey.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -17389,47 +12308,8 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, return nil, err } return ret, nil - // { - // "description": "Creates a new HMAC key for the specified service account.", - // "httpMethod": "POST", - // "id": "storage.projects.hmacKeys.create", - // "parameterOrder": [ - // "projectId", - // "serviceAccountEmail" - // ], - // "parameters": { - // "projectId": { - // "description": "Project ID owning the service account.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "serviceAccountEmail": { - // "description": "Email address of the service account.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys", - // "response": { - // "$ref": "HmacKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.projects.hmacKeys.delete": - type ProjectsHmacKeysDeleteCall struct { s *Service projectId string @@ -17450,31 +12330,29 @@ func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *Pro return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsHmacKeysDeleteCall) UserProject(userProject string) *ProjectsHmacKeysDeleteCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsHmacKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsHmacKeysDeleteCall) Context(ctx context.Context) *ProjectsHmacKeysDeleteCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -17483,12 +12361,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { } func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") @@ -17518,45 +12391,8 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { return gensupport.WrapError(err) } return nil - // { - // "description": "Deletes an HMAC key.", - // "httpMethod": "DELETE", - // "id": "storage.projects.hmacKeys.delete", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key to be deleted.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the requested key", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } -// method id "storage.projects.hmacKeys.get": - type ProjectsHmacKeysGetCall struct { s *Service projectId string @@ -17569,9 +12405,8 @@ type ProjectsHmacKeysGetCall struct { // Get: Retrieves an HMAC key's metadata // -// - accessId: Name of the HMAC key. -// - projectId: Project ID owning the service account of the requested -// key. +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested key. func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -17579,41 +12414,37 @@ func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *Projec return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsHmacKeysGetCall) UserProject(userProject string) *ProjectsHmacKeysGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsHmacKeysGetCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ProjectsHmacKeysGetCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsHmacKeysGetCall) Context(ctx context.Context) *ProjectsHmacKeysGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsHmacKeysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -17622,12 +12453,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { } func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17649,12 +12475,11 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.projects.hmacKeys.get" call. -// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -17685,49 +12510,8 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta return nil, err } return ret, nil - // { - // "description": "Retrieves an HMAC key's metadata", - // "httpMethod": "GET", - // "id": "storage.projects.hmacKeys.get", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the service account of the requested key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "response": { - // "$ref": "HmacKeyMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only" - // ] - // } - } -// method id "storage.projects.hmacKeys.list": - type ProjectsHmacKeysListCall struct { s *Service projectId string @@ -17746,76 +12530,69 @@ func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCa return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items to return in a single page of responses. The service uses -// this parameter or 250 items, whichever is smaller. The max number of -// items per page will also be limited by the number of distinct service -// accounts in the response. If the number of service accounts in a -// single response is too high, the page will truncated and a next page -// token will be returned. +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// to return in a single page of responses. The service uses this parameter or +// 250 items, whichever is smaller. The max number of items per page will also +// be limited by the number of distinct service accounts in the response. If +// the number of service accounts in a single response is too high, the page +// will truncated and a next page token will be returned. func (c *ProjectsHmacKeysListCall) MaxResults(maxResults int64) *ProjectsHmacKeysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. func (c *ProjectsHmacKeysListCall) PageToken(pageToken string) *ProjectsHmacKeysListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ServiceAccountEmail sets the optional parameter -// "serviceAccountEmail": If present, only keys for the given service -// account are returned. +// ServiceAccountEmail sets the optional parameter "serviceAccountEmail": If +// present, only keys for the given service account are returned. func (c *ProjectsHmacKeysListCall) ServiceAccountEmail(serviceAccountEmail string) *ProjectsHmacKeysListCall { c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) return c } -// ShowDeletedKeys sets the optional parameter "showDeletedKeys": -// Whether or not to show keys in the DELETED state. +// ShowDeletedKeys sets the optional parameter "showDeletedKeys": Whether or +// not to show keys in the DELETED state. func (c *ProjectsHmacKeysListCall) ShowDeletedKeys(showDeletedKeys bool) *ProjectsHmacKeysListCall { c.urlParams_.Set("showDeletedKeys", fmt.Sprint(showDeletedKeys)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsHmacKeysListCall) UserProject(userProject string) *ProjectsHmacKeysListCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsHmacKeysListCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ProjectsHmacKeysListCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysListCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsHmacKeysListCall) Context(ctx context.Context) *ProjectsHmacKeysListCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsHmacKeysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -17824,12 +12601,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { } func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -17850,12 +12622,11 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) } // Do executes the "storage.projects.hmacKeys.list" call. -// Exactly one of *HmacKeysMetadata or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HmacKeysMetadata.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeysMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMetadata, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -17886,61 +12657,6 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe return nil, err } return ret, nil - // { - // "description": "Retrieves a list of HMAC keys matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.projects.hmacKeys.list", - // "parameterOrder": [ - // "projectId" - // ], - // "parameters": { - // "maxResults": { - // "default": "250", - // "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "projectId": { - // "description": "Name of the project in which to look for HMAC keys.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "serviceAccountEmail": { - // "description": "If present, only keys for the given service account are returned.", - // "location": "query", - // "type": "string" - // }, - // "showDeletedKeys": { - // "description": "Whether or not to show keys in the DELETED state.", - // "location": "query", - // "type": "boolean" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys", - // "response": { - // "$ref": "HmacKeysMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only" - // ] - // } - } // Pages invokes f for each page of results. @@ -17948,7 +12664,7 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe // The provided context supersedes any context provided to the Context method. func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMetadata) error) error { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + defer c.PageToken(c.urlParams_.Get("pageToken")) for { x, err := c.Do() if err != nil { @@ -17964,8 +12680,6 @@ func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMe } } -// method id "storage.projects.hmacKeys.update": - type ProjectsHmacKeysUpdateCall struct { s *Service projectId string @@ -17979,9 +12693,8 @@ type ProjectsHmacKeysUpdateCall struct { // Update: Updates the state of an HMAC key. See the HMAC Key resource // descriptor for valid states. // -// - accessId: Name of the HMAC key being updated. -// - projectId: Project ID owning the service account of the updated -// key. +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated key. func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -17990,31 +12703,29 @@ func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmac return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsHmacKeysUpdateCall) UserProject(userProject string) *ProjectsHmacKeysUpdateCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsHmacKeysUpdateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsHmacKeysUpdateCall) Context(ctx context.Context) *ProjectsHmacKeysUpdateCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -18023,18 +12734,12 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { } func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") @@ -18052,12 +12757,11 @@ func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, erro } // Do executes the "storage.projects.hmacKeys.update" call. -// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -18088,50 +12792,8 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM return nil, err } return ret, nil - // { - // "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", - // "httpMethod": "PUT", - // "id": "storage.projects.hmacKeys.update", - // "parameterOrder": [ - // "projectId", - // "accessId" - // ], - // "parameters": { - // "accessId": { - // "description": "Name of the HMAC key being updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projectId": { - // "description": "Project ID owning the service account of the updated key.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/hmacKeys/{accessId}", - // "request": { - // "$ref": "HmacKeyMetadata" - // }, - // "response": { - // "$ref": "HmacKeyMetadata" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - } -// method id "storage.projects.serviceAccount.get": - type ProjectsServiceAccountGetCall struct { s *Service projectId string @@ -18141,8 +12803,8 @@ type ProjectsServiceAccountGetCall struct { header_ http.Header } -// Get: Get the email address of this project's Google Cloud Storage -// service account. +// Get: Get the email address of this project's Google Cloud Storage service +// account. // // - projectId: Project ID. func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { @@ -18151,41 +12813,37 @@ func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAc return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. +// Context sets the context to be used in this call's Do method. func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { c.ctx_ = ctx return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. func (c *ProjectsServiceAccountGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) @@ -18194,12 +12852,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { } func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } @@ -18220,12 +12873,10 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e } // Do executes the "storage.projects.serviceAccount.get" call. -// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ServiceAccount.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. +// Any non-2xx status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") @@ -18256,37 +12907,4 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi return nil, err } return ret, nil - // { - // "description": "Get the email address of this project's Google Cloud Storage service account.", - // "httpMethod": "GET", - // "id": "storage.projects.serviceAccount.get", - // "parameterOrder": [ - // "projectId" - // ], - // "parameters": { - // "projectId": { - // "description": "Project ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{projectId}/serviceAccount", - // "response": { - // "$ref": "ServiceAccount" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index bfc55594..12ebb0a1 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -17,6 +17,10 @@ import ( "sync" "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/grpctransport" + "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/compute/metadata" "go.opencensus.io/plugin/ocgrpc" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -79,6 +83,13 @@ func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, e if o.GRPCConnPool != nil { return o.GRPCConnPool.Conn(), nil } + if o.IsNewAuthLibraryEnabled() { + pool, err := dialPoolNewAuth(ctx, true, 1, o) + if err != nil { + return nil, err + } + return pool.Connection(), nil + } // NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize) // on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it. // @@ -94,6 +105,13 @@ func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.Clien if err != nil { return nil, err } + if o.IsNewAuthLibraryEnabled() { + pool, err := dialPoolNewAuth(ctx, false, 1, o) + if err != nil { + return nil, err + } + return pool.Connection(), nil + } return dial(ctx, true, o) } @@ -112,6 +130,18 @@ func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error if o.GRPCConnPool != nil { return o.GRPCConnPool, nil } + + if o.IsNewAuthLibraryEnabled() { + if o.GRPCConn != nil { + return &singleConnPool{o.GRPCConn}, nil + } + pool, err := dialPoolNewAuth(ctx, true, o.GRPCConnPoolSize, o) + if err != nil { + return nil, err + } + return &poolAdapter{pool}, nil + } + poolSize := o.GRPCConnPoolSize if o.GRPCConn != nil { // WithGRPCConn is technically incompatible with WithGRPCConnectionPool. @@ -141,6 +171,90 @@ func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error return pool, nil } +// dialPoolNewAuth is an adapter to call new auth library. +func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *internal.DialSettings) (grpctransport.GRPCClientConnPool, error) { + // honor options if set + var creds *auth.Credentials + if ds.InternalCredentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials) + } else if ds.Credentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials) + } else if ds.AuthCredentials != nil { + creds = ds.AuthCredentials + } else if ds.TokenSource != nil { + credOpts := &auth.CredentialsOptions{ + TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource), + } + if ds.QuotaProject != "" { + credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return ds.QuotaProject, nil + }) + } + creds = auth.NewCredentials(credOpts) + } + + var skipValidation bool + // If our clients explicitly setup the credential skip validation as it is + // assumed correct + if ds.SkipValidation || ds.InternalCredentials != nil { + skipValidation = true + } + + var aud string + if len(ds.Audiences) > 0 { + aud = ds.Audiences[0] + } + metadata := map[string]string{} + if ds.QuotaProject != "" { + metadata["X-goog-user-project"] = ds.QuotaProject + } + if ds.RequestReason != "" { + metadata["X-goog-request-reason"] = ds.RequestReason + } + + // Defaults for older clients that don't set this value yet + defaultEndpointTemplate := ds.DefaultEndpointTemplate + if defaultEndpointTemplate == "" { + defaultEndpointTemplate = ds.DefaultEndpoint + } + + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } + + pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ + DisableTelemetry: ds.TelemetryDisabled, + DisableAuthentication: ds.NoAuth, + Endpoint: ds.Endpoint, + Metadata: metadata, + GRPCDialOpts: ds.GRPCDialOpts, + PoolSize: poolSize, + Credentials: creds, + APIKey: ds.APIKey, + DetectOpts: &credentials.DetectOptions{ + Scopes: ds.Scopes, + Audience: aud, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + TokenURL: tokenURL, + Client: oauth2Client, + }, + InternalOptions: &grpctransport.InternalOptions{ + EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, + EnableDirectPath: ds.EnableDirectPath, + EnableDirectPathXds: ds.EnableDirectPathXds, + EnableJWTWithScope: ds.EnableJwtWithScope, + DefaultAudience: ds.DefaultAudience, + DefaultEndpointTemplate: defaultEndpointTemplate, + DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint, + DefaultScopes: ds.DefaultScopes, + SkipValidation: skipValidation, + }, + }) + return pool, err +} + func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go index 4cf94a27..c731293d 100644 --- a/vendor/google.golang.org/api/transport/grpc/pool.go +++ b/vendor/google.golang.org/api/transport/grpc/pool.go @@ -9,6 +9,7 @@ import ( "fmt" "sync/atomic" + "cloud.google.com/go/auth/grpctransport" "google.golang.org/api/internal" "google.golang.org/grpc" ) @@ -90,3 +91,27 @@ func (m multiError) Error() string { } return fmt.Sprintf("%s (and %d other errors)", s, n-1) } + +type poolAdapter struct { + pool grpctransport.GRPCClientConnPool +} + +func (p *poolAdapter) Conn() *grpc.ClientConn { + return p.pool.Connection() +} + +func (p *poolAdapter) Num() int { + return p.pool.Len() +} + +func (p *poolAdapter) Close() error { + return p.pool.Close() +} + +func (p *poolAdapter) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.pool.Invoke(ctx, method, args, reply, opts...) +} + +func (p *poolAdapter) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.pool.NewStream(ctx, desc, method, opts...) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c4f5e0b1..a36e2431 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -15,6 +15,10 @@ import ( "net/http" "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/oauth2adapt" "go.opencensus.io/plugin/ochttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" @@ -43,6 +47,13 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, return settings.HTTPClient, endpoint, nil } + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, nil, settings) + if err != nil { + return nil, "", err + } + return client, endpoint, nil + } trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err @@ -50,6 +61,88 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, return &http.Client{Transport: trans}, endpoint, nil } +// newClientNewAuth is an adapter to call new auth library. +func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.DialSettings) (*http.Client, error) { + // honor options if set + var creds *auth.Credentials + if ds.InternalCredentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials) + } else if ds.Credentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials) + } else if ds.AuthCredentials != nil { + creds = ds.AuthCredentials + } else if ds.TokenSource != nil { + credOpts := &auth.CredentialsOptions{ + TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource), + } + if ds.QuotaProject != "" { + credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return ds.QuotaProject, nil + }) + } + creds = auth.NewCredentials(credOpts) + } + + var skipValidation bool + // If our clients explicitly setup the credential skip validation as it is + // assumed correct + if ds.SkipValidation || ds.InternalCredentials != nil { + skipValidation = true + } + + // Defaults for older clients that don't set this value yet + defaultEndpointTemplate := ds.DefaultEndpointTemplate + if defaultEndpointTemplate == "" { + defaultEndpointTemplate = ds.DefaultEndpoint + } + + var aud string + if len(ds.Audiences) > 0 { + aud = ds.Audiences[0] + } + headers := http.Header{} + if ds.QuotaProject != "" { + headers.Set("X-goog-user-project", ds.QuotaProject) + } + if ds.RequestReason != "" { + headers.Set("X-goog-request-reason", ds.RequestReason) + } + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } + client, err := httptransport.NewClient(&httptransport.Options{ + DisableTelemetry: ds.TelemetryDisabled, + DisableAuthentication: ds.NoAuth, + Headers: headers, + Endpoint: ds.Endpoint, + APIKey: ds.APIKey, + Credentials: creds, + ClientCertProvider: ds.ClientCertSource, + BaseRoundTripper: base, + DetectOpts: &credentials.DetectOptions{ + Scopes: ds.Scopes, + Audience: aud, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + TokenURL: tokenURL, + Client: oauth2Client, + }, + InternalOptions: &httptransport.InternalOptions{ + EnableJWTWithScope: ds.EnableJwtWithScope, + DefaultAudience: ds.DefaultAudience, + DefaultEndpointTemplate: defaultEndpointTemplate, + DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint, + DefaultScopes: ds.DefaultScopes, + SkipValidation: skipValidation, + }, + }) + if err != nil { + return nil, err + } + return client, nil +} + // NewTransport creates an http.RoundTripper for use communicating with a Google // cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { @@ -60,6 +153,13 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl if settings.HTTPClient != nil { return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") } + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, base, settings) + if err != nil { + return nil, err + } + return client.Transport, nil + } return newTransport(ctx, base, settings) } diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md deleted file mode 100644 index 28969361..00000000 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ /dev/null @@ -1,88 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. Get the package: - - `go get -d google.golang.org/appengine` -1. Change into the checked out source: - - `cd $GOPATH/src/google.golang.org/appengine` -1. Fork the repo. -1. Set your fork as a remote: - - `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` -1. Make changes, commit to your fork. -1. Send a pull request with your changes. - The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. - -# Testing - -## Running system tests - -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. - -Run tests with `go test`: - -``` -go test -v google.golang.org/appengine/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index 5ccddd99..00000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Go App Engine packages - -[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) - -This repository supports the Go runtime on *App Engine standard*. -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [GitHub's issue -tracker](https://github.com/golang/appengine/issues). - -## Upgrading an App Engine app to the flexible environment - -This package does not work on *App Engine flexible*. - -There are many differences between the App Engine standard environment and -the flexible environment. - -See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). - -## Directory structure - -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating from legacy (`import "appengine"`) packages - -If you're currently using the bare `appengine` packages -(that is, not these ones, imported via `google.golang.org/appengine`), -then you can use the `aefix` tool to help automate an upgrade to these packages. - -Run `go get google.golang.org/appengine/cmd/aefix` to install it. - -### 1. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 2. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and there are some differences: - -* `appengine.Context` has been replaced with the `Context` type from `context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the - feature you require is not present in the new - [blobstore package](https://google.golang.org/appengine/blobstore). -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. - -## Key Encode/Decode compatibility to help with datastore library migrations - -Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. -The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. - -### Enabling key conversion - -Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling. - -#### 1. Basic or manual scaling - -This start handler will enable key conversion for all handlers in the service. - -``` -http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) { - datastore.EnableKeyConversion(appengine.NewContext(r)) -}) -``` - -#### 2. Automatic scaling - -`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))` -before you use code that needs key conversion. - -You may want to add this to each of your handlers, or introduce middleware where it's called. -`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index 35ba9c89..00000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine // import "google.golang.org/appengine" - -import ( - "context" - "net/http" - - "github.com/golang/protobuf/proto" - - "google.golang.org/appengine/internal" -) - -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine. -// -// On App Engine Flexible it installs a trivial health checker if one isn't -// already registered, and starts listening on port 8080 (overridden by the -// $PORT environment variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// On App Engine Standard it ensures the server has started and is prepared to -// receive requests. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// Middleware wraps an http handler so that it can make GAE API calls -var Middleware func(http.Handler) http.Handler = internal.Middleware - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// IsStandard reports whether the App Engine app is running in the standard -// environment. This includes both the first generation runtimes (<= Go 1.9) -// and the second generation runtimes (>= Go 1.11). -func IsStandard() bool { - return internal.IsStandard() -} - -// IsFlex reports whether the App Engine app is running in the flexible environment. -func IsFlex() bool { - return internal.IsFlex() -} - -// IsAppEngine reports whether the App Engine app is running on App Engine, in either -// the standard or flexible environment. -func IsAppEngine() bool { - return internal.IsAppEngine() -} - -// IsSecondGen reports whether the App Engine app is running on the second generation -// runtimes (>= Go 1.11). -func IsSecondGen() bool { - return internal.IsSecondGen() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return internal.ReqContext(req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index 6e1d041c..00000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build !appengine -// +build !appengine - -package appengine - -import ( - "context" -) - -// BackgroundContext returns a context not associated with a request. -// -// Deprecated: App Engine no longer has a special background context. -// Just use context.Background(). -func BackgroundContext() context.Context { - return context.Background() -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e..00000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index 1202fc1a..00000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "context" - "time" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index 0569f5dd..00000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build !appengine -// +build !appengine - -package internal - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - MaxIdleConns: 1000, - MaxIdleConnsPerHost: 10000, - IdleConnTimeout: 90 * time.Second, - }, - } -) - -func apiURL(ctx context.Context) *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { - host = hostOverride.(string) - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { - port = portOverride.(string) - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -// Middleware wraps an http handler so that it can make GAE API calls -func Middleware(next http.Handler) http.Handler { - return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) -} - -func handleHTTPMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - c := &aeContext{ - req: r, - outHeader: w.Header(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - if logToLogservice() { - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - } - - next.ServeHTTP(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - flushed := make(chan struct{}) - if logToLogservice() { - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - } - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - if logToLogservice() { - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed - } - }) -} - -func executeRequestSafelyMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if x := recover(); x != nil { - c := w.(*aeContext) - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - next.ServeHTTP(w, r) - }) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -// aeContext represents the aeContext of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type aeContext struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } -} - -var contextKey = "holds a *context" - -// jointContext joins two contexts in a superficial way. -// It takes values and timeouts from a base context, and only values from another context. -type jointContext struct { - base context.Context - valuesOnly context.Context -} - -func (c jointContext) Deadline() (time.Time, bool) { - return c.base.Deadline() -} - -func (c jointContext) Done() <-chan struct{} { - return c.base.Done() -} - -func (c jointContext) Err() error { - return c.base.Err() -} - -func (c jointContext) Value(key interface{}) interface{} { - if val := c.base.Value(key); val != nil { - return val - } - return c.valuesOnly.Value(key) -} - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx context.Context) *aeContext { - c, _ := ctx.Value(&contextKey).(*aeContext) - return c -} - -func withContext(parent context.Context, c *aeContext) context.Context { - ctx := context.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *aeContext) context.Context { - return withContext(context.Background(), c) -} - -func IncomingHeaders(ctx context.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) context.Context { - return req.Context() -} - -func WithContext(parent context.Context, req *http.Request) context.Context { - return jointContext{ - base: parent, - valuesOnly: req.Context(), - } -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { - ctx := req.Context() - ctx = withAPIHostOverride(ctx, apiURL.Hostname()) - ctx = withAPIPortOverride(ctx, apiURL.Port()) - ctx = WithAppIDOverride(ctx, appID) - - // use the unregistered request as a placeholder so that withContext can read the headers - c := &aeContext{req: req} - c.req = req.WithContext(withContext(ctx, c)) - return c.req -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *aeContext) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *aeContext) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *aeContext) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { - apiURL := apiURL(ctx) - hreq := &http.Request{ - Method: "POST", - URL: apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: apiURL.Host, - } - c := fromContext(ctx) - if c != nil { - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx context.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction aeContext has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := "" - if c != nil { - ticket = c.req.Header.Get(ticketHeader) - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := post(ctx, hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *aeContext) Request() *http.Request { - return c.req -} - -func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *aeContext, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine aeContext") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - if logToLogservice() { - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - } - // Log to stdout if not deployed - if !IsSecondGen() { - log.Print(logLevelName[level] + ": " + s) - } -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *aeContext) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *aeContext) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) context.Context { - return toContext(&aeContext{req: req}) -} - -func logToLogservice() bool { - // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json - // where $LOG_DIR is /var/log in prod and some tmpdir in dev - return os.Getenv("LOG_TO_LOGSERVICE") != "0" -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index 87c33c79..00000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package internal - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" -) - -var contextKey = "holds an appengine.Context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx context.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { - c := fromContext(ctx) - if c == nil { - return nil, errNotAppEngineContext - } - return c, nil -} - -func withContext(parent context.Context, c appengine.Context) context.Context { - ctx := context.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx context.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func ReqContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) -} - -func WithContext(parent context.Context, req *http.Request) context.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) context.Context { - return withContext(context.Background(), &testingContext{req: req}) -} - -func Call(ctx context.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func Middleware(next http.Handler) http.Handler { - panic("Middleware called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index 5b95c13d..00000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "errors" - "os" - - "github.com/golang/protobuf/proto" -) - -type ctxKey string - -func (c ctxKey) String() string { - return "appengine context key: " + string(c) -} - -var errNotAppEngineContext = errors.New("not an App Engine context") - -type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return context.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { - return context.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx context.Context, appID string) context.Context { - return context.WithValue(ctx, &appIDOverrideKey, appID) -} - -var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") - -func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { - return context.WithValue(ctx, apiHostOverrideKey, apiHost) -} - -var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") - -func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { - return context.WithValue(ctx, apiPortOverrideKey, apiPort) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx context.Context, ns string) context.Context { - return context.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx context.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx context.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx context.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx context.Context, namespace string) context.Context { - return withNamespace(ctx, namespace) -} - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07..00000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 9a2ff77a..00000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} -func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} -} - -type AppIdentityServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} -func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} -} -func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) -} -func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) -} -func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) -} -func (m *AppIdentityServiceError) XXX_Size() int { - return xxx_messageInfo_AppIdentityServiceError.Size(m) -} -func (m *AppIdentityServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} -func (*SignForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} -} -func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) -} -func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) -} -func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppRequest.Merge(dst, src) -} -func (m *SignForAppRequest) XXX_Size() int { - return xxx_messageInfo_SignForAppRequest.Size(m) -} -func (m *SignForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} -func (*SignForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} -} -func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) -} -func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) -} -func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppResponse.Merge(dst, src) -} -func (m *SignForAppResponse) XXX_Size() int { - return xxx_messageInfo_SignForAppResponse.Size(m) -} -func (m *SignForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} -func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} -} -func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) -} -func (m *GetPublicCertificateForAppRequest) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) -} -func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} -func (*PublicCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} -} -func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) -} -func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) -} -func (dst *PublicCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublicCertificate.Merge(dst, src) -} -func (m *PublicCertificate) XXX_Size() int { - return xxx_messageInfo_PublicCertificate.Size(m) -} -func (m *PublicCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_PublicCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} -func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} -} -func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) -} -func (m *GetPublicCertificateForAppResponse) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) -} -func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} -func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} -} -func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) -} -func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) -} -func (m *GetServiceAccountNameRequest) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) -} -func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} -func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} -} -func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) -} -func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) -} -func (m *GetServiceAccountNameResponse) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) -} -func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} -func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} -} -func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) -} -func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) -} -func (m *GetAccessTokenRequest) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenRequest.Size(m) -} -func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} -func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} -} -func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) -} -func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) -} -func (m *GetAccessTokenResponse) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenResponse.Size(m) -} -func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} -func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} -func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { - proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") - proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") - proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") - proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") - proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") - proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") - proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") - proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") - proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") - proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") - proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") - proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) -} - -var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ - // 676 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, - 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, - 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, - 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, - 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, - 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, - 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, - 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, - 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, - 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, - 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, - 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, - 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, - 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, - 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, - 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, - 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, - 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, - 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, - 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, - 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, - 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, - 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, - 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, - 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, - 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, - 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, - 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, - 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, - 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, - 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, - 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, - 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, - 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, - 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, - 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, - 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, - 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, - 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, - 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, - 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, - 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, - 0xf3, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5..00000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index db4777e6..00000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/base/api_base.proto - -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} -func (*StringProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} -} -func (m *StringProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringProto.Unmarshal(m, b) -} -func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) -} -func (dst *StringProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringProto.Merge(dst, src) -} -func (m *StringProto) XXX_Size() int { - return xxx_messageInfo_StringProto.Size(m) -} -func (m *StringProto) XXX_DiscardUnknown() { - xxx_messageInfo_StringProto.DiscardUnknown(m) -} - -var xxx_messageInfo_StringProto proto.InternalMessageInfo - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} -func (*Integer32Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} -} -func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) -} -func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) -} -func (dst *Integer32Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer32Proto.Merge(dst, src) -} -func (m *Integer32Proto) XXX_Size() int { - return xxx_messageInfo_Integer32Proto.Size(m) -} -func (m *Integer32Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer32Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} -func (*Integer64Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} -} -func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) -} -func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) -} -func (dst *Integer64Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer64Proto.Merge(dst, src) -} -func (m *Integer64Proto) XXX_Size() int { - return xxx_messageInfo_Integer64Proto.Size(m) -} -func (m *Integer64Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer64Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} -func (*BoolProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} -} -func (m *BoolProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolProto.Unmarshal(m, b) -} -func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) -} -func (dst *BoolProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolProto.Merge(dst, src) -} -func (m *BoolProto) XXX_Size() int { - return xxx_messageInfo_BoolProto.Size(m) -} -func (m *BoolProto) XXX_DiscardUnknown() { - xxx_messageInfo_BoolProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolProto proto.InternalMessageInfo - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} -func (*DoubleProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} -} -func (m *DoubleProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleProto.Unmarshal(m, b) -} -func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) -} -func (dst *DoubleProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleProto.Merge(dst, src) -} -func (m *DoubleProto) XXX_Size() int { - return xxx_messageInfo_DoubleProto.Size(m) -} -func (m *DoubleProto) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleProto proto.InternalMessageInfo - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} -func (*BytesProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} -} -func (m *BytesProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesProto.Unmarshal(m, b) -} -func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) -} -func (dst *BytesProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesProto.Merge(dst, src) -} -func (m *BytesProto) XXX_Size() int { - return xxx_messageInfo_BytesProto.Size(m) -} -func (m *BytesProto) XXX_DiscardUnknown() { - xxx_messageInfo_BytesProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesProto proto.InternalMessageInfo - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} -func (*VoidProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} -} -func (m *VoidProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VoidProto.Unmarshal(m, b) -} -func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) -} -func (dst *VoidProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoidProto.Merge(dst, src) -} -func (m *VoidProto) XXX_Size() int { - return xxx_messageInfo_VoidProto.Size(m) -} -func (m *VoidProto) XXX_DiscardUnknown() { - xxx_messageInfo_VoidProto.DiscardUnknown(m) -} - -var xxx_messageInfo_VoidProto proto.InternalMessageInfo - -func init() { - proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") - proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") - proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") - proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") - proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") - proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") - proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) -} - -var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, - 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, - 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, - 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, - 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, - 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, - 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, - 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, - 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, - 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, - 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, - 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, - 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3c..00000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 2fb74828..00000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,4367 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto - -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} -func (Property_Meaning) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} -func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} -func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} -func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} -func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} -func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} -func (Query_Hint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} -func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} -func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} -func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} -func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} -} - -type BeginTransactionRequest_TransactionMode int32 - -const ( - BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 - BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 - BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 -) - -var BeginTransactionRequest_TransactionMode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READ_ONLY", - 2: "READ_WRITE", -} -var BeginTransactionRequest_TransactionMode_value = map[string]int32{ - "UNKNOWN": 0, - "READ_ONLY": 1, - "READ_WRITE": 2, -} - -func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { - p := new(BeginTransactionRequest_TransactionMode) - *p = x - return p -} -func (x BeginTransactionRequest_TransactionMode) String() string { - return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) -} -func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") - if err != nil { - return err - } - *x = BeginTransactionRequest_TransactionMode(value) - return nil -} -func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} -} - -type Action struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} -func (*Action) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} -} -func (m *Action) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Action.Unmarshal(m, b) -} -func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Action.Marshal(b, m, deterministic) -} -func (dst *Action) XXX_Merge(src proto.Message) { - xxx_messageInfo_Action.Merge(dst, src) -} -func (m *Action) XXX_Size() int { - return xxx_messageInfo_Action.Size(m) -} -func (m *Action) XXX_DiscardUnknown() { - xxx_messageInfo_Action.DiscardUnknown(m) -} - -var xxx_messageInfo_Action proto.InternalMessageInfo - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} -func (*PropertyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} -} -func (m *PropertyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue.Unmarshal(m, b) -} -func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue.Merge(dst, src) -} -func (m *PropertyValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue.Size(m) -} -func (m *PropertyValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue proto.InternalMessageInfo - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} -func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} -} -func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) -} -func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) -} -func (m *PropertyValue_PointValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_PointValue.Size(m) -} -func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} -func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} -} -func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) -} -func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) -} -func (m *PropertyValue_UserValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_UserValue.Size(m) -} -func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} -func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} -} -func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) -} -func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} -func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} -func (*Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} -} -func (m *Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Property.Unmarshal(m, b) -} -func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Property.Marshal(b, m, deterministic) -} -func (dst *Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Property.Merge(dst, src) -} -func (m *Property) XXX_Size() int { - return xxx_messageInfo_Property.Size(m) -} -func (m *Property) XXX_DiscardUnknown() { - xxx_messageInfo_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Property proto.InternalMessageInfo - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} -} -func (m *Path) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path.Unmarshal(m, b) -} -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path.Marshal(b, m, deterministic) -} -func (dst *Path) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path.Merge(dst, src) -} -func (m *Path) XXX_Size() int { - return xxx_messageInfo_Path.Size(m) -} -func (m *Path) XXX_DiscardUnknown() { - xxx_messageInfo_Path.DiscardUnknown(m) -} - -var xxx_messageInfo_Path proto.InternalMessageInfo - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} -func (*Path_Element) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} -} -func (m *Path_Element) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path_Element.Unmarshal(m, b) -} -func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) -} -func (dst *Path_Element) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path_Element.Merge(dst, src) -} -func (m *Path_Element) XXX_Size() int { - return xxx_messageInfo_Path_Element.Size(m) -} -func (m *Path_Element) XXX_DiscardUnknown() { - xxx_messageInfo_Path_Element.DiscardUnknown(m) -} - -var xxx_messageInfo_Path_Element proto.InternalMessageInfo - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} -func (*Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} -} -func (m *Reference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reference.Unmarshal(m, b) -} -func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reference.Marshal(b, m, deterministic) -} -func (dst *Reference) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reference.Merge(dst, src) -} -func (m *Reference) XXX_Size() int { - return xxx_messageInfo_Reference.Size(m) -} -func (m *Reference) XXX_DiscardUnknown() { - xxx_messageInfo_Reference.DiscardUnknown(m) -} - -var xxx_messageInfo_Reference proto.InternalMessageInfo - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_User.Unmarshal(m, b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_User.Marshal(b, m, deterministic) -} -func (dst *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(dst, src) -} -func (m *User) XXX_Size() int { - return xxx_messageInfo_User.Size(m) -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} -func (*EntityProto) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} -} -func (m *EntityProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EntityProto.Unmarshal(m, b) -} -func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) -} -func (dst *EntityProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EntityProto.Merge(dst, src) -} -func (m *EntityProto) XXX_Size() int { - return xxx_messageInfo_EntityProto.Size(m) -} -func (m *EntityProto) XXX_DiscardUnknown() { - xxx_messageInfo_EntityProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EntityProto proto.InternalMessageInfo - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} -func (*CompositeProperty) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} -} -func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) -} -func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) -} -func (dst *CompositeProperty) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeProperty.Merge(dst, src) -} -func (m *CompositeProperty) XXX_Size() int { - return xxx_messageInfo_CompositeProperty.Size(m) -} -func (m *CompositeProperty) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeProperty.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} -func (*Index) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} -} -func (m *Index) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index.Unmarshal(m, b) -} -func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index.Marshal(b, m, deterministic) -} -func (dst *Index) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index.Merge(dst, src) -} -func (m *Index) XXX_Size() int { - return xxx_messageInfo_Index.Size(m) -} -func (m *Index) XXX_DiscardUnknown() { - xxx_messageInfo_Index.DiscardUnknown(m) -} - -var xxx_messageInfo_Index proto.InternalMessageInfo - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} -func (*Index_Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} -} -func (m *Index_Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index_Property.Unmarshal(m, b) -} -func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) -} -func (dst *Index_Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index_Property.Merge(dst, src) -} -func (m *Index_Property) XXX_Size() int { - return xxx_messageInfo_Index_Property.Size(m) -} -func (m *Index_Property) XXX_DiscardUnknown() { - xxx_messageInfo_Index_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Index_Property proto.InternalMessageInfo - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} -func (*CompositeIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} -} -func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) -} -func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) -} -func (dst *CompositeIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndex.Merge(dst, src) -} -func (m *CompositeIndex) XXX_Size() int { - return xxx_messageInfo_CompositeIndex.Size(m) -} -func (m *CompositeIndex) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} -func (*IndexPostfix) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} -} -func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) -} -func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix.Merge(dst, src) -} -func (m *IndexPostfix) XXX_Size() int { - return xxx_messageInfo_IndexPostfix.Size(m) -} -func (m *IndexPostfix) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} -func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} -} -func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) -} -func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) -} -func (m *IndexPostfix_IndexValue) XXX_Size() int { - return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) -} -func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} -func (*IndexPosition) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} -} -func (m *IndexPosition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPosition.Unmarshal(m, b) -} -func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) -} -func (dst *IndexPosition) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPosition.Merge(dst, src) -} -func (m *IndexPosition) XXX_Size() int { - return xxx_messageInfo_IndexPosition.Size(m) -} -func (m *IndexPosition) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPosition.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPosition proto.InternalMessageInfo - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} -func (*InternalHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} -} -func (m *InternalHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InternalHeader.Unmarshal(m, b) -} -func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) -} -func (dst *InternalHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalHeader.Merge(dst, src) -} -func (m *InternalHeader) XXX_Size() int { - return xxx_messageInfo_InternalHeader.Size(m) -} -func (m *InternalHeader) XXX_DiscardUnknown() { - xxx_messageInfo_InternalHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalHeader proto.InternalMessageInfo - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} -} -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (dst *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(dst, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query.Unmarshal(m, b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) -} -func (dst *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(dst, src) -} -func (m *Query) XXX_Size() int { - return xxx_messageInfo_Query.Size(m) -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} -func (*Query_Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} -func (m *Query_Filter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Filter.Unmarshal(m, b) -} -func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) -} -func (dst *Query_Filter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Filter.Merge(dst, src) -} -func (m *Query_Filter) XXX_Size() int { - return xxx_messageInfo_Query_Filter.Size(m) -} -func (m *Query_Filter) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Filter.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Filter proto.InternalMessageInfo - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} -func (*Query_Order) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} -} -func (m *Query_Order) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Order.Unmarshal(m, b) -} -func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) -} -func (dst *Query_Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Order.Merge(dst, src) -} -func (m *Query_Order) XXX_Size() int { - return xxx_messageInfo_Query_Order.Size(m) -} -func (m *Query_Order) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Order proto.InternalMessageInfo - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} -func (*CompiledQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} -} -func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) -} -func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery.Merge(dst, src) -} -func (m *CompiledQuery) XXX_Size() int { - return xxx_messageInfo_CompiledQuery.Size(m) -} -func (m *CompiledQuery) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} -func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} -} -func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) -} -func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) -} -func (m *CompiledQuery_PrimaryScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) -} -func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} -func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} -} -func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) -} -func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} -func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} -} -func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) -} -func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) -} -func (m *CompiledQuery_EntityFilter) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) -} -func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} -func (*CompiledCursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} -} -func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) -} -func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor.Merge(dst, src) -} -func (m *CompiledCursor) XXX_Size() int { - return xxx_messageInfo_CompiledCursor.Size(m) -} -func (m *CompiledCursor) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} -func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} -} -func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) -} -func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) -} -func (m *CompiledCursor_Position) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position.Size(m) -} -func (m *CompiledCursor_Position) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} -func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} -} -func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) -} -func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} -func (*Cursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} -} -func (m *Cursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cursor.Unmarshal(m, b) -} -func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) -} -func (dst *Cursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cursor.Merge(dst, src) -} -func (m *Cursor) XXX_Size() int { - return xxx_messageInfo_Cursor.Size(m) -} -func (m *Cursor) XXX_DiscardUnknown() { - xxx_messageInfo_Cursor.DiscardUnknown(m) -} - -var xxx_messageInfo_Cursor proto.InternalMessageInfo - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} -} -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (dst *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(dst, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} -func (*Cost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} -} -func (m *Cost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost.Unmarshal(m, b) -} -func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost.Marshal(b, m, deterministic) -} -func (dst *Cost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost.Merge(dst, src) -} -func (m *Cost) XXX_Size() int { - return xxx_messageInfo_Cost.Size(m) -} -func (m *Cost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost proto.InternalMessageInfo - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} -func (*Cost_CommitCost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} -} -func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) -} -func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) -} -func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost_CommitCost.Merge(dst, src) -} -func (m *Cost_CommitCost) XXX_Size() int { - return xxx_messageInfo_Cost_CommitCost.Size(m) -} -func (m *Cost_CommitCost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (dst *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(dst, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (dst *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(dst, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} -func (*GetResponse_Entity) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} -} -func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) -} -func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) -} -func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse_Entity.Merge(dst, src) -} -func (m *GetResponse_Entity) XXX_Size() int { - return xxx_messageInfo_GetResponse_Entity.Size(m) -} -func (m *GetResponse_Entity) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutRequest.Unmarshal(m, b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) -} -func (dst *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(dst, src) -} -func (m *PutRequest) XXX_Size() int { - return xxx_messageInfo_PutRequest.Size(m) -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutRequest proto.InternalMessageInfo - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutResponse.Unmarshal(m, b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) -} -func (dst *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(dst, src) -} -func (m *PutResponse) XXX_Size() int { - return xxx_messageInfo_PutResponse.Size(m) -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PutResponse proto.InternalMessageInfo - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} -func (*TouchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} -} -func (m *TouchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchRequest.Unmarshal(m, b) -} -func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) -} -func (dst *TouchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchRequest.Merge(dst, src) -} -func (m *TouchRequest) XXX_Size() int { - return xxx_messageInfo_TouchRequest.Size(m) -} -func (m *TouchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TouchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchRequest proto.InternalMessageInfo - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} -func (*TouchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} -} -func (m *TouchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchResponse.Unmarshal(m, b) -} -func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) -} -func (dst *TouchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchResponse.Merge(dst, src) -} -func (m *TouchResponse) XXX_Size() int { - return xxx_messageInfo_TouchResponse.Size(m) -} -func (m *TouchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TouchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchResponse proto.InternalMessageInfo - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(dst, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(dst, src) -} -func (m *DeleteResponse) XXX_Size() int { - return xxx_messageInfo_DeleteResponse.Size(m) -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} -func (*NextRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} -} -func (m *NextRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NextRequest.Unmarshal(m, b) -} -func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) -} -func (dst *NextRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NextRequest.Merge(dst, src) -} -func (m *NextRequest) XXX_Size() int { - return xxx_messageInfo_NextRequest.Size(m) -} -func (m *NextRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NextRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NextRequest proto.InternalMessageInfo - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} -} -func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryResult.Unmarshal(m, b) -} -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) -} -func (dst *QueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResult.Merge(dst, src) -} -func (m *QueryResult) XXX_Size() int { - return xxx_messageInfo_QueryResult.Size(m) -} -func (m *QueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResult proto.InternalMessageInfo - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} -func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} -} -func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) -} -func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) -} -func (m *AllocateIdsRequest) XXX_Size() int { - return xxx_messageInfo_AllocateIdsRequest.Size(m) -} -func (m *AllocateIdsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} -func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} -} -func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) -} -func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) -} -func (m *AllocateIdsResponse) XXX_Size() int { - return xxx_messageInfo_AllocateIdsResponse.Size(m) -} -func (m *AllocateIdsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} -func (*CompositeIndices) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} -} -func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) -} -func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) -} -func (dst *CompositeIndices) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndices.Merge(dst, src) -} -func (m *CompositeIndices) XXX_Size() int { - return xxx_messageInfo_CompositeIndices.Size(m) -} -func (m *CompositeIndices) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndices.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} -func (*AddActionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} -} -func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) -} -func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) -} -func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsRequest.Merge(dst, src) -} -func (m *AddActionsRequest) XXX_Size() int { - return xxx_messageInfo_AddActionsRequest.Size(m) -} -func (m *AddActionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} -func (*AddActionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} -} -func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) -} -func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) -} -func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsResponse.Merge(dst, src) -} -func (m *AddActionsResponse) XXX_Size() int { - return xxx_messageInfo_AddActionsResponse.Size(m) -} -func (m *AddActionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` - DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` - Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` - PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} -} -func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) -} -func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) -} -func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) -} -func (m *BeginTransactionRequest) XXX_Size() int { - return xxx_messageInfo_BeginTransactionRequest.Size(m) -} -func (m *BeginTransactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false -const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -func (m *BeginTransactionRequest) GetDatabaseId() string { - if m != nil && m.DatabaseId != nil { - return *m.DatabaseId - } - return "" -} - -func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { - if m != nil && m.Mode != nil { - return *m.Mode - } - return Default_BeginTransactionRequest_Mode -} - -func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { - if m != nil { - return m.PreviousTransaction - } - return nil -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} -func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} -} -func (m *CommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse.Unmarshal(m, b) -} -func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) -} -func (dst *CommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse.Merge(dst, src) -} -func (m *CommitResponse) XXX_Size() int { - return xxx_messageInfo_CommitResponse.Size(m) -} -func (m *CommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse proto.InternalMessageInfo - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} -func (*CommitResponse_Version) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} -} -func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) -} -func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) -} -func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse_Version.Merge(dst, src) -} -func (m *CommitResponse_Version) XXX_Size() int { - return xxx_messageInfo_CommitResponse_Version.Size(m) -} -func (m *CommitResponse_Version) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { - proto.RegisterType((*Action)(nil), "appengine.Action") - proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") - proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") - proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") - proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") - proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") - proto.RegisterType((*Property)(nil), "appengine.Property") - proto.RegisterType((*Path)(nil), "appengine.Path") - proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") - proto.RegisterType((*Reference)(nil), "appengine.Reference") - proto.RegisterType((*User)(nil), "appengine.User") - proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") - proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") - proto.RegisterType((*Index)(nil), "appengine.Index") - proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") - proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") - proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") - proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") - proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") - proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") - proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") - proto.RegisterType((*Transaction)(nil), "appengine.Transaction") - proto.RegisterType((*Query)(nil), "appengine.Query") - proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") - proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") - proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") - proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") - proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") - proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") - proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") - proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") - proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") - proto.RegisterType((*Cursor)(nil), "appengine.Cursor") - proto.RegisterType((*Error)(nil), "appengine.Error") - proto.RegisterType((*Cost)(nil), "appengine.Cost") - proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") - proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") - proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") - proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") - proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") - proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") - proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") - proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") - proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") - proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") - proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") - proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") - proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") - proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") - proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") - proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") - proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") - proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") - proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) -} - -var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ - // 4156 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, - 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, - 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, - 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, - 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, - 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, - 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, - 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, - 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, - 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, - 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, - 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, - 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, - 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, - 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, - 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, - 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, - 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, - 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, - 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, - 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, - 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, - 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, - 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, - 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, - 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, - 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, - 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, - 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, - 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, - 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, - 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, - 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, - 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, - 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, - 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, - 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, - 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, - 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, - 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, - 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, - 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, - 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, - 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, - 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, - 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, - 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, - 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, - 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, - 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, - 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, - 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, - 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, - 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, - 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, - 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, - 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, - 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, - 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, - 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, - 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, - 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, - 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, - 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, - 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, - 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, - 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, - 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, - 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, - 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, - 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, - 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, - 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, - 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, - 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, - 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, - 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, - 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, - 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, - 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, - 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, - 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, - 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, - 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, - 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, - 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, - 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, - 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, - 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, - 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, - 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, - 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, - 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, - 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, - 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, - 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, - 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, - 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, - 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, - 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, - 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, - 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, - 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, - 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, - 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, - 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, - 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, - 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, - 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, - 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, - 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, - 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, - 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, - 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, - 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, - 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, - 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, - 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, - 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, - 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, - 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, - 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, - 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, - 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, - 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, - 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, - 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, - 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, - 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, - 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, - 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, - 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, - 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, - 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, - 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, - 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, - 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, - 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, - 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, - 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, - 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, - 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, - 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, - 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, - 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, - 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, - 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, - 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, - 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, - 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, - 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, - 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, - 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, - 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, - 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, - 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, - 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, - 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, - 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, - 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, - 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, - 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, - 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, - 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, - 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, - 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, - 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, - 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, - 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, - 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, - 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, - 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, - 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, - 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, - 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, - 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, - 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, - 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, - 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, - 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, - 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, - 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, - 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, - 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, - 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, - 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, - 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, - 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, - 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, - 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, - 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, - 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, - 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, - 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, - 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, - 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, - 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, - 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, - 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, - 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, - 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, - 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, - 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, - 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, - 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, - 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, - 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, - 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, - 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, - 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, - 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, - 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, - 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, - 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, - 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, - 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, - 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, - 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, - 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, - 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, - 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, - 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, - 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, - 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, - 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, - 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, - 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, - 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, - 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, - 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, - 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, - 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, - 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, - 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, - 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, - 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, - 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, - 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, - 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, - 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, - 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, - 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, - 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, - 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, - 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, - 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, - 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, - 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, - 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, - 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, - 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, - 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, - 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, - 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, - 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, - 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, - 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, - 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, - 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index 497b4d9a..00000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,551 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; - optional string database_id = 4; - - enum TransactionMode { - UNKNOWN = 0; - READ_ONLY = 1; - READ_WRITE = 2; - } - optional TransactionMode mode = 5 [default = UNKNOWN]; - - optional Transaction previous_transaction = 7; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index 0f95aa91..00000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "context" - "os" -) - -var ( - // This is set to true in identity_classic.go, which is behind the appengine build tag. - // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not - // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a - // first-gen runtime. See IsStandard below for the second-gen check. - appengineStandard bool - - // This is set to true in identity_flex.go, which is behind the appenginevm build tag. - appengineFlex bool -) - -// AppID is the implementation of the wrapper function of the same name in -// ../identity.go. See that file for commentary. -func AppID(c context.Context) string { - return appID(FullyQualifiedAppID(c)) -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsStandard() bool { - // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not - // second-gen (>= Go 1.11). - return appengineStandard || IsSecondGen() -} - -// IsSecondGen is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsSecondGen() bool { - // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. - return os.Getenv("GAE_ENV") == "standard" -} - -// IsFlex is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsFlex() bool { - return appengineFlex -} - -// IsAppEngine is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsAppEngine() bool { - return IsStandard() || IsFlex() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index 5ad3548b..00000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package internal - -import ( - "context" - - "appengine" -) - -func init() { - appengineStandard = true -} - -func DefaultVersionHostname(ctx context.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.DefaultVersionHostname(c) -} - -func Datacenter(_ context.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func RequestID(ctx context.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.RequestID(c) -} - -func ModuleName(ctx context.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.ModuleName(c) -} -func VersionID(ctx context.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.VersionID(c) -} - -func fullyQualifiedAppID(ctx context.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return c.FullyQualifiedAppID() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go deleted file mode 100644 index 4201b6b5..00000000 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2018 Google LLC. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build appenginevm -// +build appenginevm - -package internal - -func init() { - appengineFlex = true -} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index 18ddda3a..00000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build !appengine -// +build !appengine - -package internal - -import ( - "context" - "log" - "net/http" - "os" - "strings" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx context.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header -} - -func DefaultVersionHostname(ctx context.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx context.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx context.Context) string { - if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { - return dc - } - // If the header isn't set, read zone from the metadata service. - // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] - zone, err := getMetadata("instance/zone") - if err != nil { - log.Printf("Datacenter: %v", err) - return "" - } - parts := strings.Split(string(zone), "/") - if len(parts) == 0 { - return "" - } - return parts[len(parts)-1] -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - if s := os.Getenv("GAE_ENV"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ context.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - if s := os.Getenv("GAE_SERVICE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ context.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - if s := os.Getenv("GAE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { - return appID - } - if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { - return project - } - return string(mustGetMetadata("instance/attributes/gae_project")) -} - -func fullyQualifiedAppID(_ context.Context) string { - if s := os.Getenv("GAE_APPLICATION"); s != "" { - return s - } - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea398..00000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 8545ac4a..00000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,1313 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/log/log_service.proto - -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} -func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} -} - -type LogServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} -func (*LogServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0} -} -func (m *LogServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogServiceError.Unmarshal(m, b) -} -func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) -} -func (dst *LogServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogServiceError.Merge(dst, src) -} -func (m *LogServiceError) XXX_Size() int { - return xxx_messageInfo_LogServiceError.Size(m) -} -func (m *LogServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_LogServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_LogServiceError proto.InternalMessageInfo - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} -func (*UserAppLogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{1} -} -func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) -} -func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) -} -func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogLine.Merge(dst, src) -} -func (m *UserAppLogLine) XXX_Size() int { - return xxx_messageInfo_UserAppLogLine.Size(m) -} -func (m *UserAppLogLine) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} -func (*UserAppLogGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{2} -} -func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) -} -func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) -} -func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogGroup.Merge(dst, src) -} -func (m *UserAppLogGroup) XXX_Size() int { - return xxx_messageInfo_UserAppLogGroup.Size(m) -} -func (m *UserAppLogGroup) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} -func (*FlushRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{3} -} -func (m *FlushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FlushRequest.Unmarshal(m, b) -} -func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) -} -func (dst *FlushRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlushRequest.Merge(dst, src) -} -func (m *FlushRequest) XXX_Size() int { - return xxx_messageInfo_FlushRequest.Size(m) -} -func (m *FlushRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FlushRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FlushRequest proto.InternalMessageInfo - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} -func (*SetStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{4} -} -func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) -} -func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) -} -func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetStatusRequest.Merge(dst, src) -} -func (m *SetStatusRequest) XXX_Size() int { - return xxx_messageInfo_SetStatusRequest.Size(m) -} -func (m *SetStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} -func (*LogOffset) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{5} -} -func (m *LogOffset) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogOffset.Unmarshal(m, b) -} -func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) -} -func (dst *LogOffset) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogOffset.Merge(dst, src) -} -func (m *LogOffset) XXX_Size() int { - return xxx_messageInfo_LogOffset.Size(m) -} -func (m *LogOffset) XXX_DiscardUnknown() { - xxx_messageInfo_LogOffset.DiscardUnknown(m) -} - -var xxx_messageInfo_LogOffset proto.InternalMessageInfo - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} -func (*LogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{6} -} -func (m *LogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogLine.Unmarshal(m, b) -} -func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) -} -func (dst *LogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogLine.Merge(dst, src) -} -func (m *LogLine) XXX_Size() int { - return xxx_messageInfo_LogLine.Size(m) -} -func (m *LogLine) XXX_DiscardUnknown() { - xxx_messageInfo_LogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_LogLine proto.InternalMessageInfo - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} -func (*RequestLog) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{7} -} -func (m *RequestLog) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RequestLog.Unmarshal(m, b) -} -func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) -} -func (dst *RequestLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestLog.Merge(dst, src) -} -func (m *RequestLog) XXX_Size() int { - return xxx_messageInfo_RequestLog.Size(m) -} -func (m *RequestLog) XXX_DiscardUnknown() { - xxx_messageInfo_RequestLog.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestLog proto.InternalMessageInfo - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} -func (*LogModuleVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{8} -} -func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) -} -func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) -} -func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogModuleVersion.Merge(dst, src) -} -func (m *LogModuleVersion) XXX_Size() int { - return xxx_messageInfo_LogModuleVersion.Size(m) -} -func (m *LogModuleVersion) XXX_DiscardUnknown() { - xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} -func (*LogReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{9} -} -func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) -} -func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) -} -func (dst *LogReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadRequest.Merge(dst, src) -} -func (m *LogReadRequest) XXX_Size() int { - return xxx_messageInfo_LogReadRequest.Size(m) -} -func (m *LogReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} -func (*LogReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{10} -} -func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) -} -func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) -} -func (dst *LogReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadResponse.Merge(dst, src) -} -func (m *LogReadResponse) XXX_Size() int { - return xxx_messageInfo_LogReadResponse.Size(m) -} -func (m *LogReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} -func (*LogUsageRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{11} -} -func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) -} -func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) -} -func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRecord.Merge(dst, src) -} -func (m *LogUsageRecord) XXX_Size() int { - return xxx_messageInfo_LogUsageRecord.Size(m) -} -func (m *LogUsageRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} -func (*LogUsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{12} -} -func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) -} -func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) -} -func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRequest.Merge(dst, src) -} -func (m *LogUsageRequest) XXX_Size() int { - return xxx_messageInfo_LogUsageRequest.Size(m) -} -func (m *LogUsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} -func (*LogUsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{13} -} -func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) -} -func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) -} -func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageResponse.Merge(dst, src) -} -func (m *LogUsageResponse) XXX_Size() int { - return xxx_messageInfo_LogUsageResponse.Size(m) -} -func (m *LogUsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { - proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") - proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") - proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") - proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") - proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") - proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") - proto.RegisterType((*LogLine)(nil), "appengine.LogLine") - proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") - proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") - proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") - proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") - proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") - proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") - proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) -} - -var fileDescriptor_log_service_f054fd4b5012319d = []byte{ - // 1553 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, - 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, - 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, - 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, - 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, - 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, - 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, - 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, - 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, - 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, - 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, - 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, - 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, - 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, - 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, - 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, - 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, - 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, - 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, - 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, - 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, - 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, - 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, - 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, - 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, - 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, - 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, - 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, - 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, - 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, - 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, - 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, - 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, - 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, - 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, - 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, - 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, - 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, - 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, - 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, - 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, - 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, - 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, - 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, - 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, - 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, - 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, - 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, - 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, - 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, - 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, - 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, - 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, - 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, - 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, - 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, - 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, - 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, - 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, - 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, - 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, - 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, - 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, - 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, - 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, - 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, - 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, - 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, - 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, - 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, - 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, - 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, - 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, - 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, - 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, - 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, - 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, - 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, - 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, - 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, - 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, - 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, - 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, - 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, - 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, - 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, - 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, - 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, - 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, - 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, - 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, - 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, - 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, - 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, - 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, - 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, - 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, - 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc47..00000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go deleted file mode 100644 index afd0ae84..00000000 --- a/vendor/google.golang.org/appengine/internal/main.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package internal - -import ( - "appengine_internal" -) - -func Main() { - MainPath = "" - appengine_internal.Main() -} diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go deleted file mode 100644 index 357dce4d..00000000 --- a/vendor/google.golang.org/appengine/internal/main_common.go +++ /dev/null @@ -1,7 +0,0 @@ -package internal - -// MainPath stores the file path of the main package. On App Engine Standard -// using Go version 1.9 and below, this will be unset. On App Engine Flex and -// App Engine Standard second-gen (Go 1.11 and above), this will be the -// filepath to package main. -var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index 86a8caf0..00000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -//go:build !appengine -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" -) - -func Main() { - MainPath = filepath.Dir(findMainPath()) - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -// Find the path to package main by looking at the root Caller. -func findMainPath() string { - pc := make([]uintptr, 100) - n := runtime.Callers(2, pc) - frames := runtime.CallersFrames(pc[:n]) - for { - frame, more := frames.Next() - // Tests won't have package main, instead they have testing.tRunner - if frame.Function == "main.main" || frame.Function == "testing.tRunner" { - return frame.File - } - if !more { - break - } - } - return "" -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index c4ba63bb..00000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index ddfc0c04..00000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,786 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/modules/modules_service.proto - -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} -func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} -} - -type ModulesServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} -func (*ModulesServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} -} -func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) -} -func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) -} -func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModulesServiceError.Merge(dst, src) -} -func (m *ModulesServiceError) XXX_Size() int { - return xxx_messageInfo_ModulesServiceError.Size(m) -} -func (m *ModulesServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo - -type GetModulesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} -func (*GetModulesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} -} -func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) -} -func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) -} -func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesRequest.Merge(dst, src) -} -func (m *GetModulesRequest) XXX_Size() int { - return xxx_messageInfo_GetModulesRequest.Size(m) -} -func (m *GetModulesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} -func (*GetModulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} -} -func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) -} -func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) -} -func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesResponse.Merge(dst, src) -} -func (m *GetModulesResponse) XXX_Size() int { - return xxx_messageInfo_GetModulesResponse.Size(m) -} -func (m *GetModulesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} -func (*GetVersionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} -} -func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) -} -func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsRequest.Merge(dst, src) -} -func (m *GetVersionsRequest) XXX_Size() int { - return xxx_messageInfo_GetVersionsRequest.Size(m) -} -func (m *GetVersionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} -func (*GetVersionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} -} -func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) -} -func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) -} -func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsResponse.Merge(dst, src) -} -func (m *GetVersionsResponse) XXX_Size() int { - return xxx_messageInfo_GetVersionsResponse.Size(m) -} -func (m *GetVersionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} -func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} -} -func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) -} -func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) -} -func (m *GetDefaultVersionRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionRequest.Size(m) -} -func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} -func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} -} -func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) -} -func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) -} -func (m *GetDefaultVersionResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionResponse.Size(m) -} -func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} -func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} -} -func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) -} -func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) -} -func (m *GetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesRequest.Size(m) -} -func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} -func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} -} -func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) -} -func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) -} -func (m *GetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesResponse.Size(m) -} -func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} -func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} -} -func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) -} -func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) -} -func (m *SetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesRequest.Size(m) -} -func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} -func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} -} -func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) -} -func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) -} -func (m *SetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesResponse.Size(m) -} -func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} -func (*StartModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} -} -func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) -} -func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleRequest.Merge(dst, src) -} -func (m *StartModuleRequest) XXX_Size() int { - return xxx_messageInfo_StartModuleRequest.Size(m) -} -func (m *StartModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} -func (*StartModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} -} -func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) -} -func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleResponse.Merge(dst, src) -} -func (m *StartModuleResponse) XXX_Size() int { - return xxx_messageInfo_StartModuleResponse.Size(m) -} -func (m *StartModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} -func (*StopModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} -} -func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) -} -func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleRequest.Merge(dst, src) -} -func (m *StopModuleRequest) XXX_Size() int { - return xxx_messageInfo_StopModuleRequest.Size(m) -} -func (m *StopModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} -func (*StopModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} -} -func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) -} -func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleResponse.Merge(dst, src) -} -func (m *StopModuleResponse) XXX_Size() int { - return xxx_messageInfo_StopModuleResponse.Size(m) -} -func (m *StopModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} -func (*GetHostnameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} -} -func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) -} -func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) -} -func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameRequest.Merge(dst, src) -} -func (m *GetHostnameRequest) XXX_Size() int { - return xxx_messageInfo_GetHostnameRequest.Size(m) -} -func (m *GetHostnameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} -func (*GetHostnameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} -} -func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) -} -func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) -} -func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameResponse.Merge(dst, src) -} -func (m *GetHostnameResponse) XXX_Size() int { - return xxx_messageInfo_GetHostnameResponse.Size(m) -} -func (m *GetHostnameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { - proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") - proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") - proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") - proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") - proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") - proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") - proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") - proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") - proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") - proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") - proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") - proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") - proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") - proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") - proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") - proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") - proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) -} - -var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, - 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, - 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, - 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, - 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, - 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, - 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, - 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, - 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, - 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, - 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, - 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, - 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, - 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, - 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, - 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, - 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, - 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, - 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, - 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, - 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, - 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, - 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, - 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, - 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, - 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, - 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, - 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065..00000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index fe429720..00000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 10*time.Second) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100644 index 2fdb546a..00000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 8d782a38..00000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto - -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} -func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} -func (*ApplicationError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{1} -} -func (m *ApplicationError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplicationError.Unmarshal(m, b) -} -func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) -} -func (dst *ApplicationError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplicationError.Merge(dst, src) -} -func (m *ApplicationError) XXX_Size() int { - return xxx_messageInfo_ApplicationError.Size(m) -} -func (m *ApplicationError) XXX_DiscardUnknown() { - xxx_messageInfo_ApplicationError.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplicationError proto.InternalMessageInfo - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} -func (*RpcError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2} -} -func (m *RpcError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RpcError.Unmarshal(m, b) -} -func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) -} -func (dst *RpcError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RpcError.Merge(dst, src) -} -func (m *RpcError) XXX_Size() int { - return xxx_messageInfo_RpcError.Size(m) -} -func (m *RpcError) XXX_DiscardUnknown() { - xxx_messageInfo_RpcError.DiscardUnknown(m) -} - -var xxx_messageInfo_RpcError proto.InternalMessageInfo - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{3} -} -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) -} -func (dst *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(dst, src) -} -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { - proto.RegisterType((*Request)(nil), "remote_api.Request") - proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") - proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") - proto.RegisterType((*Response)(nil), "remote_api.Response") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) -} - -var fileDescriptor_remote_api_1978114ec33a273d = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, - 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, - 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, - 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, - 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, - 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, - 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, - 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, - 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, - 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, - 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, - 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, - 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, - 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, - 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, - 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, - 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, - 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, - 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, - 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, - 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, - 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, - 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, - 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, - 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, - 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, - 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, - 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, - 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, - 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, - 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, - 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, - 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4..00000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 2ae8ab9f..00000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "context" - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx context.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx context.Context, t *transaction) context.Context { - return context.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { - if transactionFromContext(c) != nil { - return nil, errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if previousTransaction != nil { - req.PreviousTransaction = previousTransaction - } - if readOnly { - req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() - } else { - req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return nil, err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return &t.transaction, err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return &t.transaction, ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return &t.transaction, ErrConcurrentTransaction - } - } - return &t.transaction, err -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go deleted file mode 100644 index 5f727750..00000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto - -package urlfetch - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type URLFetchServiceError_ErrorCode int32 - -const ( - URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 - URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 - URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 - URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 - URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 - URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 - URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 - URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 - URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 - URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 - URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 - URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 - URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 -) - -var URLFetchServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_URL", - 2: "FETCH_ERROR", - 3: "UNSPECIFIED_ERROR", - 4: "RESPONSE_TOO_LARGE", - 5: "DEADLINE_EXCEEDED", - 6: "SSL_CERTIFICATE_ERROR", - 7: "DNS_ERROR", - 8: "CLOSED", - 9: "INTERNAL_TRANSIENT_ERROR", - 10: "TOO_MANY_REDIRECTS", - 11: "MALFORMED_REPLY", - 12: "CONNECTION_ERROR", -} -var URLFetchServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_URL": 1, - "FETCH_ERROR": 2, - "UNSPECIFIED_ERROR": 3, - "RESPONSE_TOO_LARGE": 4, - "DEADLINE_EXCEEDED": 5, - "SSL_CERTIFICATE_ERROR": 6, - "DNS_ERROR": 7, - "CLOSED": 8, - "INTERNAL_TRANSIENT_ERROR": 9, - "TOO_MANY_REDIRECTS": 10, - "MALFORMED_REPLY": 11, - "CONNECTION_ERROR": 12, -} - -func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { - p := new(URLFetchServiceError_ErrorCode) - *p = x - return p -} -func (x URLFetchServiceError_ErrorCode) String() string { - return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) -} -func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") - if err != nil { - return err - } - *x = URLFetchServiceError_ErrorCode(value) - return nil -} -func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0} -} - -type URLFetchRequest_RequestMethod int32 - -const ( - URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 - URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 - URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 - URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 - URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 - URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 -) - -var URLFetchRequest_RequestMethod_name = map[int32]string{ - 1: "GET", - 2: "POST", - 3: "HEAD", - 4: "PUT", - 5: "DELETE", - 6: "PATCH", -} -var URLFetchRequest_RequestMethod_value = map[string]int32{ - "GET": 1, - "POST": 2, - "HEAD": 3, - "PUT": 4, - "DELETE": 5, - "PATCH": 6, -} - -func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { - p := new(URLFetchRequest_RequestMethod) - *p = x - return p -} -func (x URLFetchRequest_RequestMethod) String() string { - return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) -} -func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") - if err != nil { - return err - } - *x = URLFetchRequest_RequestMethod(value) - return nil -} -func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} - -type URLFetchServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } -func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } -func (*URLFetchServiceError) ProtoMessage() {} -func (*URLFetchServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0} -} -func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b) -} -func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic) -} -func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchServiceError.Merge(dst, src) -} -func (m *URLFetchServiceError) XXX_Size() int { - return xxx_messageInfo_URLFetchServiceError.Size(m) -} -func (m *URLFetchServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo - -type URLFetchRequest struct { - Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` - Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` - Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` - FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` - Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` - MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } -func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest) ProtoMessage() {} -func (*URLFetchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1} -} -func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b) -} -func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest.Merge(dst, src) -} -func (m *URLFetchRequest) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest.Size(m) -} -func (m *URLFetchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo - -const Default_URLFetchRequest_FollowRedirects bool = true -const Default_URLFetchRequest_MustValidateServerCertificate bool = true - -func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { - if m != nil && m.Method != nil { - return *m.Method - } - return URLFetchRequest_GET -} - -func (m *URLFetchRequest) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchRequest) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *URLFetchRequest) GetFollowRedirects() bool { - if m != nil && m.FollowRedirects != nil { - return *m.FollowRedirects - } - return Default_URLFetchRequest_FollowRedirects -} - -func (m *URLFetchRequest) GetDeadline() float64 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return 0 -} - -func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { - if m != nil && m.MustValidateServerCertificate != nil { - return *m.MustValidateServerCertificate - } - return Default_URLFetchRequest_MustValidateServerCertificate -} - -type URLFetchRequest_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } -func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest_Header) ProtoMessage() {} -func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} -func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b) -} -func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src) -} -func (m *URLFetchRequest_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest_Header.Size(m) -} -func (m *URLFetchRequest_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo - -func (m *URLFetchRequest_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchRequest_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type URLFetchResponse struct { - Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` - StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` - Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` - ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` - ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` - FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` - ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` - ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` - ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } -func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse) ProtoMessage() {} -func (*URLFetchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2} -} -func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b) -} -func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse.Merge(dst, src) -} -func (m *URLFetchResponse) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse.Size(m) -} -func (m *URLFetchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo - -const Default_URLFetchResponse_ContentWasTruncated bool = false -const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 -const Default_URLFetchResponse_ApiBytesSent int64 = 0 -const Default_URLFetchResponse_ApiBytesReceived int64 = 0 - -func (m *URLFetchResponse) GetContent() []byte { - if m != nil { - return m.Content - } - return nil -} - -func (m *URLFetchResponse) GetStatusCode() int32 { - if m != nil && m.StatusCode != nil { - return *m.StatusCode - } - return 0 -} - -func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchResponse) GetContentWasTruncated() bool { - if m != nil && m.ContentWasTruncated != nil { - return *m.ContentWasTruncated - } - return Default_URLFetchResponse_ContentWasTruncated -} - -func (m *URLFetchResponse) GetExternalBytesSent() int64 { - if m != nil && m.ExternalBytesSent != nil { - return *m.ExternalBytesSent - } - return 0 -} - -func (m *URLFetchResponse) GetExternalBytesReceived() int64 { - if m != nil && m.ExternalBytesReceived != nil { - return *m.ExternalBytesReceived - } - return 0 -} - -func (m *URLFetchResponse) GetFinalUrl() string { - if m != nil && m.FinalUrl != nil { - return *m.FinalUrl - } - return "" -} - -func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { - if m != nil && m.ApiCpuMilliseconds != nil { - return *m.ApiCpuMilliseconds - } - return Default_URLFetchResponse_ApiCpuMilliseconds -} - -func (m *URLFetchResponse) GetApiBytesSent() int64 { - if m != nil && m.ApiBytesSent != nil { - return *m.ApiBytesSent - } - return Default_URLFetchResponse_ApiBytesSent -} - -func (m *URLFetchResponse) GetApiBytesReceived() int64 { - if m != nil && m.ApiBytesReceived != nil { - return *m.ApiBytesReceived - } - return Default_URLFetchResponse_ApiBytesReceived -} - -type URLFetchResponse_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } -func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse_Header) ProtoMessage() {} -func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0} -} -func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b) -} -func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src) -} -func (m *URLFetchResponse_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse_Header.Size(m) -} -func (m *URLFetchResponse_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo - -func (m *URLFetchResponse_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchResponse_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -func init() { - proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError") - proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest") - proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header") - proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse") - proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced) -} - -var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{ - // 770 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54, - 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29, - 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e, - 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d, - 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b, - 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27, - 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92, - 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7, - 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17, - 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec, - 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c, - 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a, - 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01, - 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14, - 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f, - 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07, - 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87, - 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a, - 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a, - 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37, - 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc, - 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde, - 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71, - 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17, - 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea, - 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4, - 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6, - 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96, - 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d, - 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d, - 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb, - 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad, - 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86, - 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20, - 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e, - 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f, - 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21, - 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c, - 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b, - 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6, - 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02, - 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b, - 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9, - 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e, - 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97, - 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3, - 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8, - 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05, - 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto deleted file mode 100644 index f695edf6..00000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "urlfetch"; - -package appengine; - -message URLFetchServiceError { - enum ErrorCode { - OK = 0; - INVALID_URL = 1; - FETCH_ERROR = 2; - UNSPECIFIED_ERROR = 3; - RESPONSE_TOO_LARGE = 4; - DEADLINE_EXCEEDED = 5; - SSL_CERTIFICATE_ERROR = 6; - DNS_ERROR = 7; - CLOSED = 8; - INTERNAL_TRANSIENT_ERROR = 9; - TOO_MANY_REDIRECTS = 10; - MALFORMED_REPLY = 11; - CONNECTION_ERROR = 12; - } -} - -message URLFetchRequest { - enum RequestMethod { - GET = 1; - POST = 2; - HEAD = 3; - PUT = 4; - DELETE = 5; - PATCH = 6; - } - required RequestMethod Method = 1; - required string Url = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bytes Payload = 6 [ctype=CORD]; - - optional bool FollowRedirects = 7 [default=true]; - - optional double Deadline = 8; - - optional bool MustValidateServerCertificate = 9 [default=true]; -} - -message URLFetchResponse { - optional bytes Content = 1; - required int32 StatusCode = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bool ContentWasTruncated = 6 [default=false]; - optional int64 ExternalBytesSent = 7; - optional int64 ExternalBytesReceived = 8; - - optional string FinalUrl = 9; - - optional int64 ApiCpuMilliseconds = 10 [default=0]; - optional int64 ApiBytesSent = 11 [default=0]; - optional int64 ApiBytesReceived = 12 [default=0]; -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 6f169be4..00000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "context" - "fmt" - "regexp" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index fcf3ad0a..00000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go deleted file mode 100644 index 6c0d7241..00000000 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package urlfetch provides an http.RoundTripper implementation -// for fetching URLs via App Engine's urlfetch service. -package urlfetch // import "google.golang.org/appengine/urlfetch" - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/urlfetch" -) - -// Transport is an implementation of http.RoundTripper for -// App Engine. Users should generally create an http.Client using -// this transport and use the Client rather than using this transport -// directly. -type Transport struct { - Context context.Context - - // Controls whether the application checks the validity of SSL certificates - // over HTTPS connections. A value of false (the default) instructs the - // application to send a request to the server only if the certificate is - // valid and signed by a trusted certificate authority (CA), and also - // includes a hostname that matches the certificate. A value of true - // instructs the application to perform no certificate validation. - AllowInvalidServerCertificate bool -} - -// Verify statically that *Transport implements http.RoundTripper. -var _ http.RoundTripper = (*Transport)(nil) - -// Client returns an *http.Client using a default urlfetch Transport. This -// client will check the validity of SSL certificates. -// -// Any deadline of the provided context will be used for requests through this client. -// If the client does not have a deadline, then an App Engine default of 60 second is used. -func Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &Transport{ - Context: ctx, - }, - } -} - -type bodyReader struct { - content []byte - truncated bool - closed bool -} - -// ErrTruncatedBody is the error returned after the final Read() from a -// response's Body if the body has been truncated by App Engine's proxy. -var ErrTruncatedBody = errors.New("urlfetch: truncated body") - -func statusCodeToText(code int) string { - if t := http.StatusText(code); t != "" { - return t - } - return strconv.Itoa(code) -} - -func (br *bodyReader) Read(p []byte) (n int, err error) { - if br.closed { - if br.truncated { - return 0, ErrTruncatedBody - } - return 0, io.EOF - } - n = copy(p, br.content) - if n > 0 { - br.content = br.content[n:] - return - } - if br.truncated { - br.closed = true - return 0, ErrTruncatedBody - } - return 0, io.EOF -} - -func (br *bodyReader) Close() error { - br.closed = true - br.content = nil - return nil -} - -// A map of the URL Fetch-accepted methods that take a request body. -var methodAcceptsRequestBody = map[string]bool{ - "POST": true, - "PUT": true, - "PATCH": true, -} - -// urlString returns a valid string given a URL. This function is necessary because -// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. -// See http://code.google.com/p/go/issues/detail?id=4860. -func urlString(u *url.URL) string { - if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { - return u.String() - } - aux := *u - aux.Opaque = "//" + aux.Host + aux.Opaque - return aux.String() -} - -// RoundTrip issues a single HTTP request and returns its response. Per the -// http.RoundTripper interface, RoundTrip only returns an error if there -// was an unsupported request or the URL Fetch proxy fails. -// Note that HTTP response codes such as 5xx, 403, 404, etc are not -// errors as far as the transport is concerned and will be returned -// with err set to nil. -func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { - methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] - if !ok { - return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) - } - - method := pb.URLFetchRequest_RequestMethod(methNum) - - freq := &pb.URLFetchRequest{ - Method: &method, - Url: proto.String(urlString(req.URL)), - FollowRedirects: proto.Bool(false), // http.Client's responsibility - MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), - } - if deadline, ok := t.Context.Deadline(); ok { - freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) - } - - for k, vals := range req.Header { - for _, val := range vals { - freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ - Key: proto.String(k), - Value: proto.String(val), - }) - } - } - if methodAcceptsRequestBody[req.Method] && req.Body != nil { - // Avoid a []byte copy if req.Body has a Bytes method. - switch b := req.Body.(type) { - case interface { - Bytes() []byte - }: - freq.Payload = b.Bytes() - default: - freq.Payload, err = ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - } - } - - fres := &pb.URLFetchResponse{} - if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { - return nil, err - } - - res = &http.Response{} - res.StatusCode = int(*fres.StatusCode) - res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) - res.Header = make(http.Header) - res.Request = req - - // Faked: - res.ProtoMajor = 1 - res.ProtoMinor = 1 - res.Proto = "HTTP/1.1" - res.Close = true - - for _, h := range fres.Header { - hkey := http.CanonicalHeaderKey(*h.Key) - hval := *h.Value - if hkey == "Content-Length" { - // Will get filled in below for all but HEAD requests. - if req.Method == "HEAD" { - res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) - } - continue - } - res.Header.Add(hkey, hval) - } - - if req.Method != "HEAD" { - res.ContentLength = int64(len(fres.Content)) - } - - truncated := fres.GetContentWasTruncated() - res.Body = &bodyReader{content: fres.Content, truncated: truncated} - return -} - -func init() { - internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) - internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index 191bea48..8b462f3d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/api/annotations.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index d5dccb93..636edb46 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -409,6 +409,9 @@ type Publishing struct { // Optional link to proto reference documentation. Example: // https://cloud.google.com/pubsub/lite/docs/reference/rpc ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"` } func (x *Publishing) Reset() { @@ -513,6 +516,13 @@ func (x *Publishing) GetProtoReferenceDocumentationUri() string { return "" } +func (x *Publishing) GetRestReferenceDocumentationUri() string { + if x != nil { + return x.RestReferenceDocumentationUri + } + return "" +} + // Settings for Java client libraries. type JavaSettings struct { state protoimpl.MessageState @@ -1210,6 +1220,14 @@ var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ Tag: "bytes,1050,opt,name=oauth_scopes", Filename: "google/api/client.proto", }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 525000001, + Name: "google.api.api_version", + Tag: "bytes,525000001,opt,name=api_version", + Filename: "google/api/client.proto", + }, } // Extension fields to descriptorpb.MethodOptions. @@ -1291,6 +1309,23 @@ var ( // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + // + // optional string api_version = 525000001; + E_ApiVersion = &file_google_api_client_proto_extTypes[3] ) var File_google_api_client_proto protoreflect.FileDescriptor @@ -1355,7 +1390,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, @@ -1390,154 +1425,163 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, - 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, - 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, + 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, - 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, - 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, + 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, - 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, - 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, - 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, - 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, - 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, - 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, + 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, + 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, + 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, + 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, + 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, + 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, + 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, + 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, - 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, - 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, - 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, - 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, - 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, - 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, - 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, - 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, - 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, - 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, - 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, - 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, - 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, - 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, + 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, + 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, + 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, + 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, + 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, + 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, + 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, + 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, + 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, + 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, + 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, + 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, + 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -1610,10 +1654,11 @@ var file_google_api_client_proto_depIdxs = []int32{ 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 28, // [28:31] is the sub-list for extension extendee + 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 28, // [28:32] is the sub-list for extension extendee 0, // [0:28] is the sub-list for field type_name } @@ -1787,7 +1832,7 @@ func file_google_api_client_proto_init() { RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, NumMessages: 16, - NumExtensions: 3, + NumExtensions: 4, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 6ce01ac9..08505ba3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc v4.24.4 // source: google/api/field_behavior.proto package annotations @@ -195,21 +195,21 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, - 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d02e6bbc..d339dfb0 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc v4.24.4 // source: google/api/field_info.proto package annotations @@ -56,9 +56,9 @@ const ( FieldInfo_IPV4 FieldInfo_Format = 2 // Internet Protocol v6 value as defined by [RFC // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be - // normalized to entirely lowercase letters, and zero-padded partial and - // empty octets. For example, the value `2001:DB8::` would be normalized to - // `2001:0db8:0:0`. + // normalized to entirely lowercase letters with zeros compressed, following + // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example, + // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`. FieldInfo_IPV6 FieldInfo_Format = 3 // An IP address in either v4 or v6 format as described by the individual // values defined herein. See the comments on the IPV4 and IPV6 types for diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 8a0e1c34..76ea76df 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/http.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index bbcc12d2..7a3fd93f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/resource.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 9a9ae04c..1d8397b0 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/routing.proto package annotations diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 3543268f..e7d3805e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 45494866..498020e3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/launch_stage.proto package api diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index cc5d52fb..bd46edbe 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/code.proto package code diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 7bd161e4..3e562182 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/error_details.proto package errdetails diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index a6b50818..6ad1b1c1 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/status.proto package status diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go index 72afd8b0..c7bef08a 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/type/date.proto package date diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 38ef56f7..7d57f34b 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v4.24.4 // source: google/type/expr.proto package expr diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1..0854d298 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `./scripts/vet.sh` to catch vet errors - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index c6672c0a..6a8a0778 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,6 +9,7 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [atollena](https://github.com/atollena), Datadog, Inc. - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f896092..be38384f 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -30,17 +30,20 @@ testdeps: GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ build \ clean \ + deps \ proto \ test \ + testsubmodule \ testrace \ + testdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79..b572707c 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d79560a2..f391744f 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -54,13 +54,14 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - if strings.ToLower(b.Name()) != b.Name() { + name := strings.ToLower(b.Name()) + if name != b.Name() { // TODO: Skip the use of strings.ToLower() to index the map after v1.59 // is released to switch to case sensitive balancer registry. Also, // remove this warning and update the docstrings for Register and Get. logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) } - m[strings.ToLower(b.Name())] = b + m[name] = b } // unregisterForTesting deletes the balancer with the given name from the @@ -232,8 +233,8 @@ type BuildOptions struct { // implementations which do not communicate with a remote load balancer // server can ignore this field. Authority string - // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID *channelz.Identifier + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 32989b3a..0adc9886 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index d8ec6539..57a792a7 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -58,11 +58,12 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{stream} + x := &loadBalancerBalanceLoadClient{ClientStream: stream} return x, nil } @@ -116,7 +117,7 @@ func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) } type LoadBalancer_BalanceLoadServer interface { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index 8942c313..96a57c8c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -21,14 +21,14 @@ package grpclb import ( "encoding/json" - "google.golang.org/grpc" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName + pickFirstName = pickfirst.Name ) type grpclbServiceConfig struct { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 20c5f2ec..671bc663 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,13 +19,13 @@ package grpclb import ( + "math/rand" "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), stats: stats, } } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index f8b5229c..506fae0d 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -246,7 +246,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParent)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 71% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 5128f936..07527603 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,54 +16,60 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " -) - -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } +var logger = grpclog.Component("pick-first-lb") + +const ( + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + type pickfirstBuilder struct{} -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (*pickfirstBuilder) Name() string { - return PickFirstBalancerName +func (pickfirstBuilder) Name() string { + return Name } type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to + // of endpoints received from the name resolver before attempting to // connect to them. ShuffleAddressList bool `json:"shuffleAddressList"` } -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) @@ -97,9 +103,14 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -111,22 +122,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig // already does so. cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forwarrd the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil @@ -243,7 +281,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad2..260255d3 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index b5e30cff..4161fdf4 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -21,7 +21,6 @@ package grpc import ( "context" "fmt" - "strings" "sync" "google.golang.org/grpc/balancer" @@ -66,19 +65,20 @@ type ccBalancerWrapper struct { } // newCCBalancerWrapper creates a new balancer wrapper in idle state. The -// underlying balancer is not created until the switchTo() method is invoked. +// underlying balancer is not created until the updateClientConnState() method +// is invoked. func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ cc: cc, opts: balancer.BuildOptions{ - DialCreds: cc.dopts.copts.TransportCredentials, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -97,6 +97,11 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat if ctx.Err() != nil || ccb.balancer == nil { return } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } err := ccb.balancer.UpdateClientConnState(*ccs) if logger.V(2) && err != nil { logger.Infof("error from balancer.UpdateClientConnState: %v", err) @@ -120,54 +125,6 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { }) } -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - // close initiates async shutdown of the wrapper. cc.mu must be held when // calling this function. To determine the wrapper has finished shutting down, // the channel should block on ccb.serializer.Done() without cc.mu held. @@ -175,7 +132,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.mu.Lock() ccb.closed = true ccb.mu.Unlock() - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") ccb.serializer.Schedule(func(context.Context) { if ccb.balancer == nil { return @@ -212,7 +169,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ @@ -241,6 +198,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { @@ -304,7 +265,7 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { } func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 856c75dd..63c639e4 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f6e815e6..423be7b4 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,13 +31,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -67,12 +67,14 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel + // errConnIdling indicates the connection is being closed as the channel // is moving to an idle mode due to inactivity. errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -101,11 +103,6 @@ const ( defaultReadBufSize = 32 * 1024 ) -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - type defaultConfigSelector struct { sc *ServiceConfig } @@ -117,13 +114,23 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// newClient returns a new client in idle mode. -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), - czData: new(channelzData), } cc.retryThrottler.Store((*retryThrottler)(nil)) @@ -148,6 +155,16 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -156,7 +173,7 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -164,26 +181,17 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. + if err = cc.initAuthority(); err != nil { + return nil, err + } + + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. @@ -191,39 +199,36 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) return cc, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. +// Dial calls DialContext(context.Background(), target, opts...). // -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. // -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. // -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc, err := newClient(target, opts...) + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) if err != nil { return nil, err } // We start the channel off in idle mode, but kick it out of idle now, - // instead of waiting for the first RPC. Other gRPC implementations do wait - // for the first RPC to kick the channel out of idle. But doing so would be - // a major behavior change for our users who are used to seeing the channel - // active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, i.e. by making newClient exported. - + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. defer func() { if err != nil { cc.Close() @@ -291,17 +296,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // addTraceEvent is a helper method to add a trace event on the channel. If the // channel is a nested one, the same event is also added on the parent channel. func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ + ted := &channelz.TraceEvent{ Desc: fmt.Sprintf("Channel %s", msg), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) } type idler ClientConn @@ -418,14 +423,15 @@ func (cc *ClientConn) validateTransportCredentials() error { } // channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) cc.addTraceEvent("created") } @@ -492,11 +498,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } // newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), } } @@ -510,7 +516,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID *channelz.Identifier + channelz *channelz.Channel pubSub *grpcsync.PubSub } @@ -527,9 +533,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) csm.pubSub.Publish(state) - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -583,12 +590,12 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -596,7 +603,6 @@ type ClientConn struct { csMgr *connectivityStateManager pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector - czData *channelzData retryThrottler atomic.Value // Updated from service config. // mu protects the following fields. @@ -690,7 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -707,15 +713,15 @@ func init() { } } -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) } } @@ -733,7 +739,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) // May need to apply the initial service config in case the resolver // doesn't support service configs, or doesn't provide a service config // with the new addresses. - cc.maybeApplyDefaultServiceConfig(nil) + cc.maybeApplyDefaultServiceConfig() cc.balancerWrapper.resolverError(err) @@ -744,10 +750,10 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var ret error if cc.dopts.disableServiceConfig { - channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) - cc.maybeApplyDefaultServiceConfig(s.Addresses) + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() } else if s.ServiceConfig == nil { - cc.maybeApplyDefaultServiceConfig(s.Addresses) + cc.maybeApplyDefaultServiceConfig() // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { @@ -755,12 +761,12 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) configSelector := iresolver.GetConfigSelector(s) if configSelector != nil { if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { - channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") } } else { configSelector = &defaultConfigSelector{sc} } - cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + cc.applyServiceConfigAndBalancer(sc, configSelector) } else { ret = balancer.ErrBadResolverState if cc.sc == nil { @@ -775,7 +781,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var balCfg serviceconfig.LoadBalancingConfig if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig.cfg + balCfg = cc.sc.lbConfig } bw := cc.balancerWrapper cc.mu.Unlock() @@ -834,22 +840,20 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, - czData: new(channelzData), + channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) - var err error - ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") - if err != nil { - return nil, err - } - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), Severity: channelz.CtInfo, }, }) @@ -872,38 +876,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - // Target returns the target string of the ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) Target() string { return cc.target } +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.channelz.ChannelMetrics.CallsFailed.Add(1) } // connect starts creating a transport. @@ -946,10 +939,14 @@ func equalAddresses(a, b []resolver.Address) bool { // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - addrs = copyAddressesWithoutBalancerAttributes(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1067,7 +1064,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st }) } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. return @@ -1088,17 +1085,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } - - var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB - } - cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1174,7 +1160,7 @@ func (cc *ClientConn) Close() error { // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil } @@ -1195,6 +1181,10 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. @@ -1206,8 +1196,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.SubChannel } // Note: this requires a lock on ac.mu. @@ -1219,10 +1208,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) close(ac.stateChan) ac.stateChan = make(chan struct{}) ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } ac.acbw.updateState(s, lastErr) } @@ -1320,6 +1310,7 @@ func (ac *addrConn) resetTransport() { func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) if ctx.Err() != nil { return errConnClosing } @@ -1335,7 +1326,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c } ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { @@ -1388,7 +1379,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - copts.ChannelzParentID = ac.channelzID + copts.ChannelzParent = ac.channelz newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { @@ -1397,7 +1388,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, } // newTr is either nil, or closed. hcancel() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1469,7 +1460,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") return } @@ -1499,9 +1490,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) } } }() @@ -1566,18 +1557,18 @@ func (ac *addrConn) tearDown(err error) { ac.cancel() ac.curAddr = resolver.Address{} - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), Severity: channelz.CtInfo, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from // being deleted right away. - channelz.RemoveEntry(ac.channelzID) + channelz.RemoveEntry(ac.channelz.ID) ac.mu.Unlock() // We have to release the lock before the call to GracefulClose/Close here @@ -1604,39 +1595,6 @@ func (ac *addrConn) tearDown(err error) { } } -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - type retryThrottler struct { max float64 thresh float64 @@ -1674,12 +1632,17 @@ func (rt *retryThrottler) successfulRPC() { } } -type channelzChannel struct { - cc *ClientConn +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) } // ErrClientConnTimeout indicates that the ClientConn cannot establish the @@ -1713,22 +1676,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1740,17 +1700,19 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { // We are here because the user's dial target did not contain a scheme or // specified an unregistered scheme. We should fallback to the default // scheme, except when a custom dialer is specified in which case, we should - // always use passthrough scheme. - defScheme := resolver.GetDefaultScheme() - channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1772,6 +1734,8 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. func encodeAuthority(authority string) string { const upperhex = "0123456789ABCDEF" @@ -1788,7 +1752,7 @@ func encodeAuthority(authority string) string { return false case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters return false - case ':', '[', ']', '@': // Authority related delimeters + case ':', '[', ']', '@': // Authority related delimiters return false } // Everything else must be escaped. @@ -1838,7 +1802,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1871,6 +1835,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7..00000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1..0b42c302 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index ca4d0331..38cb5cf0 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 4a407495..55fc7f65 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/handshaker.proto @@ -331,6 +331,11 @@ type StartClientHandshakeReq struct { RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` // (Optional) Maximum frame size supported by the client. MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + // (Optional) An access token created by the caller only intended for use in + // ALTS connections. The access token that should be used to authenticate to + // the peer. The access token MUST be strongly bound to the ALTS credentials + // used to establish the connection that the token is sent over. + AccessToken string `protobuf:"bytes,11,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"` } func (x *StartClientHandshakeReq) Reset() { @@ -435,6 +440,13 @@ func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 { return 0 } +func (x *StartClientHandshakeReq) GetAccessToken() string { + if x != nil { + return x.AccessToken + } + return "" +} + type ServerHandshakeParameters struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -446,6 +458,11 @@ type ServerHandshakeParameters struct { // (Optional) A list of local identities supported by the server, if // specified. Otherwise, the handshaker chooses a default local identity. LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // A token created by the caller only intended for use in + // ALTS connections. The token should be used to authenticate to + // the peer. The token MUST be strongly bound to the ALTS credentials + // used to establish the connection that the token is sent over. + Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"` } func (x *ServerHandshakeParameters) Reset() { @@ -494,6 +511,13 @@ func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { return nil } +func (x *ServerHandshakeParameters) GetToken() string { + if x != nil && x.Token != nil { + return *x.Token + } + return "" +} + type StartServerHandshakeReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1047,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xd3, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1084,135 +1108,139 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa5, - 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, - 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, - 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, + 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, + 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, + 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, - 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, - 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, - 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, - 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, - 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, - 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, - 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, - 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, + 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, + 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, + 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, + 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, + 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, + 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, + 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, - 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, + 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, + 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, + 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1410,6 +1438,7 @@ func file_grpc_gcp_handshaker_proto_init() { (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index ba1c46f6..358074b6 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/gcp/handshaker.proto @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" @@ -49,7 +49,7 @@ type HandshakerServiceClient interface { // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. - DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) } type handshakerServiceClient struct { @@ -60,36 +60,18 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl return &handshakerServiceClient{cc} } -func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &handshakerServiceDoHandshakeClient{stream} + x := &grpc.GenericClientStream[HandshakerReq, HandshakerResp]{ClientStream: stream} return x, nil } -type HandshakerService_DoHandshakeClient interface { - Send(*HandshakerReq) error - Recv() (*HandshakerResp, error) - grpc.ClientStream -} - -type handshakerServiceDoHandshakeClient struct { - grpc.ClientStream -} - -func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { - m := new(HandshakerResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerReq, HandshakerResp] // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer @@ -101,7 +83,7 @@ type HandshakerServiceServer interface { // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. - DoHandshake(HandshakerService_DoHandshakeServer) error + DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error mustEmbedUnimplementedHandshakerServiceServer() } @@ -109,7 +91,7 @@ type HandshakerServiceServer interface { type UnimplementedHandshakerServiceServer struct { } -func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error { +func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} @@ -126,30 +108,11 @@ func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServ } func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) + return srv.(HandshakerServiceServer).DoHandshake(&grpc.GenericServerStream[HandshakerReq, HandshakerResp]{ServerStream: stream}) } -type HandshakerService_DoHandshakeServer interface { - Send(*HandshakerResp) error - Recv() (*HandshakerReq, error) - grpc.ServerStream -} - -type handshakerServiceDoHandshakeServer struct { - grpc.ServerStream -} - -func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { - m := new(HandshakerReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeServer = grpc.BidiStreamingServer[HandshakerReq, HandshakerResp] // HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 3e53b2b1..18cc9cfb 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 5feac3aa..665e790b 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -28,9 +28,9 @@ import ( "fmt" "net" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index 2c5c8b9e..cccb2227 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -25,7 +25,7 @@ import ( "strings" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/xds" ) const cfeClusterNamePrefix = "google_cfe_" @@ -63,7 +63,7 @@ func clusterName(ctx context.Context) string { if chi.Attributes == nil { return "" } - cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + cluster, _ := xds.GetXDSHandshakeClusterName(chi.Attributes) return cluster } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34e..41143585 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ba242618..f5453d48 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -36,6 +37,11 @@ import ( "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,6 +49,14 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions @@ -68,7 +82,7 @@ type dialOptions struct { binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption - channelzParentID *channelz.Identifier + channelzParent channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -79,6 +93,8 @@ type dialOptions struct { resolvers []resolver.Builder idleTimeout time.Duration recvBufferPool SharedBufferPool + defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -88,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -154,9 +183,7 @@ func WithSharedWriteBuffer(val bool) DialOption { } // WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. +// write on the wire. The default value for this buffer is 32KB. // // Zero or negative values will disable the write buffer such that each write // will be on underlying connection. Note: A Send call may not directly @@ -301,6 +328,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -315,10 +345,8 @@ func WithBlock() DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -388,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -471,9 +499,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a // later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { @@ -555,9 +582,9 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id *channelz.Identifier) DialOption { +func WithChannelzParentID(c channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id + o.channelzParent = c }) } @@ -602,12 +629,22 @@ func WithDisableRetry() DialOption { }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all @@ -645,10 +682,12 @@ func defaultDialOptions() dialOptions { healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // @@ -659,6 +698,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the @@ -694,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + // WithRecvBufferPool returns a DialOption that configures the ClientConn // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 5bf880d4..38b88350 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 4c46c098..51b736ba 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/health/v1/health.proto @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -43,6 +43,10 @@ const ( // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthClient interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -81,8 +85,9 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { } func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -90,11 +95,12 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{stream} + x := &healthWatchClient{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -124,6 +130,10 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer // for forward compatibility +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. type HealthServer interface { // Check gets the health of the specified service. If the requested service // is unknown, the call will fail with status NOT_FOUND. If the caller does @@ -198,7 +208,7 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) + return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) } type Health_WatchServer interface { diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011..b15cf482 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 00000000..13821a92 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 3c594e6e..73bb4c4e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { // process is not complete when this method returns. This method must be called // synchronously alongside the rest of the balancer.Balancer methods this // Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { gsb.mu.Lock() if gsb.closed { gsb.mu.Unlock() - return errBalancerClosed + return nil, errBalancerClosed } bw := &balancerWrapper{ - gsb: gsb, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { gsb.balancerCurrent = nil } gsb.mu.Unlock() - return balancer.ErrBadResolverState + return nil, balancer.ErrBadResolverState } // This write doesn't need to take gsb.mu because this field never gets read @@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { // bw.Balancer field will never be forwarded to until this SwitchTo() // function returns. bw.Balancer = newBalancer - return nil + return bw, nil } // Returns nil if the graceful switch balancer is closed. @@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { } // UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + if balToUpdate == nil { return errBalancerClosed } + // Perform this call without gsb.mu to prevent deadlocks if the child calls // back into the channel. The latest balancer can never be closed during a // call from the channel, even without gsb.mu held. @@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) return } // Perform this call without gsb.mu to prevent deadlocks if the child calls @@ -261,7 +294,8 @@ func (gsb *Balancer) Close() { // graceful switch logic. type balancerWrapper struct { balancer.Balancer - gsb *Balancer + gsb *Balancer + builder balancer.Builder lastState balancer.State subconns map[balancer.SubConn]bool // subconns created by this balancer diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index e8456a77..aa4505a8 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -65,7 +65,7 @@ type TruncatingMethodLogger struct { callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } // NewTruncatingMethodLogger returns a new truncating method logger. @@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: DefaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } @@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 00000000..d7e9e1d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 00000000..dfe18b08 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,402 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided in two two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index fc094f34..03e24e15 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -16,47 +16,32 @@ * */ -// Package channelz defines APIs for enabling channelz service, entry +// Package channelz defines internal APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. -// -// All APIs in this package are experimental. package channelz import ( - "errors" - "sort" - "sync" "sync/atomic" "time" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" ) -const ( - defaultMaxTraceEntry int32 = 30 -) - var ( // IDGen is the global channelz entity ID generator. It should not be used // outside this package except by tests. IDGen IDGenerator - db dbWrapper - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + db *channelMap = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. func TurnOn() { - if !IsOn() { - db.set(newChannelMap()) - IDGen.Reset() - atomic.StoreInt32(&curState, 1) - } + atomic.StoreInt32(&curState, 1) } func init() { @@ -70,49 +55,15 @@ func IsOn() bool { return atomic.LoadInt32(&curState) == 1 } -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a @@ -120,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to +// SocketMetrics, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) } -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) } -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) } -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) +func GetServer(id int64) *Server { + return db.getServer(id) } // RegisterChannel registers the given channel c in the channelz database with -// ref as its reference name, and adds it to the child list of its parent -// (identified by pid). pid == nil means no parent. +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. // // Returns a unique channelz identifier assigned to this channel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +func RegisterChannel(parent *Channel, target string) *Channel { id := IDGen.genID() - var parent int64 - isTopChannel := true - if pid != nil { - isTopChannel = false - parent = pid.Int() - } if !IsOn() { - return newIdentifer(RefChannel, id, pid) + return &Channel{ID: id} } - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, nestedChans: make(map[int64]string), - id: id, - pid: parent, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - db.get().addChannel(id, cn, isTopChannel, parent) - return newIdentifer(RefChannel, id, pid) + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn } // RegisterSubChannel registers the given subChannel c in the channelz database @@ -196,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { // Returns a unique channelz identifier assigned to this subChannel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a SubChannel's parent id cannot be nil") - } +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefSubChannel, id, pid), nil + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, } - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid.Int(), - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + if !IsOn() { + return sc } - db.get().addSubChannel(id, sc, pid.Int()) - return newIdentifer(RefSubChannel, id, pid), nil + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterServer(s Server, ref string) *Identifier { +func RegisterServer(ref string) *Server { id := IDGen.genID() if !IsOn() { - return newIdentifer(RefServer, id, nil) + return &Server{ID: id} } - svr := &server{ - refName: ref, - s: s, + svr := &Server{ + RefName: ref, sockets: make(map[int64]string), listenSockets: make(map[int64]string), - id: id, + ID: id, } - db.get().addServer(id, svr) - return newIdentifer(RefServer, id, nil) + db.addServer(id, svr) + return svr } -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a ListenSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefListenSocket, id, pid), nil - } - - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addListenSocket(id, ls, pid.Int()) - return newIdentifer(RefListenSocket, id, pid), nil -} - -// RegisterNormalSocket registers the given normal socket s in channelz database +// RegisterSocket registers the given normal socket s in channelz database // with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a NormalSocket's parent id cannot be 0") +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefNormalSocket, id, pid), nil - } - - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addNormalSocket(id, ns, pid.Int()) - return newIdentifer(RefNormalSocket, id, pid), nil + return skt } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. // // If channelz is not turned ON, this function is a no-op. -func RemoveEntry(id *Identifier) { +func RemoveEntry(id int64) { if !IsOn() { return } - db.get().removeEntry(id.Int()) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe -// the event to be added to the channel trace. -// -// The Parent field is optional. It is used for an event that will be recorded -// in the entity's parent trace. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the -// provided TraceEventDesc. -// -// If channelz is not turned ON, this will simply log the event descriptions. -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { - // Log only the trace description associated with the bottom most entity. - switch desc.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, withParens(id)+desc.Desc) - case CtWarning: - l.WarningDepth(depth+1, withParens(id)+desc.Desc) - case CtError: - l.ErrorDepth(depth+1, withParens(id)+desc.Desc) - } - - if getMaxTraceEntry() == 0 { - return - } - if IsOn() { - db.get().traceEvent(id.Int(), desc) - } -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func newChannelMap() *channelMap { - return &channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - } -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - s := make([]*SocketMetric, 0, len(sks)) - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm + db.removeEntry(id) } // IDGenerator is an incrementing atomic that tracks IDs for channelz entities. @@ -761,3 +220,11 @@ func (i *IDGenerator) Reset() { func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go deleted file mode 100644 index c9a27acd..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/id.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import "fmt" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier struct { - typ RefChannelType - id int64 - str string - pid *Identifier -} - -// Type returns the entity type corresponding to id. -func (id *Identifier) Type() RefChannelType { - return id.typ -} - -// Int returns the integer identifier corresponding to id. -func (id *Identifier) Int() int64 { - return id.id -} - -// String returns a string representation of the entity corresponding to id. -// -// This includes some information about the parent as well. Examples: -// Top-level channel: [Channel #channel-number] -// Nested channel: [Channel #parent-channel-number Channel #channel-number] -// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] -func (id *Identifier) String() string { - return id.str -} - -// Equal returns true if other is the same as id. -func (id *Identifier) Equal(other *Identifier) bool { - if (id != nil) != (other != nil) { - return false - } - if id == nil && other == nil { - return true - } - return id.typ == other.typ && id.id == other.id && id.pid == other.pid -} - -// NewIdentifierForTesting returns a new opaque identifier to be used only for -// testing purposes. -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { - return newIdentifer(typ, id, pid) -} - -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { - str := fmt.Sprintf("%s #%d", typ, id) - if pid != nil { - str = fmt.Sprintf("%s %s", pid, str) - } - return &Identifier{typ: typ, id: id, str: str, pid: pid} -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index f89e6f77..ee4d7212 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,53 +26,49 @@ import ( var logger = grpclog.Component("channelz") -func withParens(id *Identifier) string { - return "[" + id.String() + "] " -} - // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtInfo, }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtError, }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtError, }) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 00000000..cdfc49d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 00000000..fa64834b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 00000000..3b88e4cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go similarity index 83% rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go index 1b1c4cce..5ac73ff8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { s.TCPInfo = v } } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index 8b06eed1..d1ed8df6 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* * @@ -41,3 +40,8 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 00000000..36b86740 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 1d4020f5..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,727 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - clearCalled bool - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUnknown indicates unknown severity of a trace event. - CtUnknown Severity = iota - // CtInfo indicates info level severity of a trace event. - CtInfo - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefUnknown indicates an unknown entity type, the zero value for this type. - RefUnknown RefChannelType = iota - // RefChannel indicates the referenced entity is a Channel. - RefChannel - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel - // RefServer indicates the referenced entity is a Server. - RefServer - // RefListenSocket indicates the referenced entity is a ListenSocket. - RefListenSocket - // RefNormalSocket indicates the referenced entity is a NormalSocket. - RefNormalSocket -) - -var refChannelTypeToString = map[RefChannelType]string{ - RefUnknown: "Unknown", - RefChannel: "Channel", - RefSubChannel: "SubChannel", - RefServer: "Server", - RefListenSocket: "ListenSocket", - RefNormalSocket: "NormalSocket", -} - -func (r RefChannelType) String() string { - return refChannelTypeToString[r] -} - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go deleted file mode 100644 index 98288c3f..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" -) - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go deleted file mode 100644 index b5568b22..00000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 685a3cb4..d9064871 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -28,9 +28,6 @@ import ( var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds @@ -43,6 +40,12 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b5..00000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af..00000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go index 9f409096..e8d86698 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -20,8 +20,6 @@ package grpcutil import ( "strings" - - "google.golang.org/grpc/internal/envconfig" ) // RegisteredCompressorNames holds names of the registered compressors. @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool { // RegisteredCompressors returns a string of registered compressor names // separated by comma. func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } return strings.Join(RegisteredCompressorNames, ",") } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 6c7ea6a5..5d665398 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -184,21 +193,25 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 52cfab1b..dbee7a60 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,9 +24,8 @@ import ( "encoding/json" "fmt" - protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" - protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) const jsonIndent = " " @@ -35,21 +34,14 @@ const jsonIndent = " " // // If marshal fails, it falls back to fmt.Sprintf("%+v"). func ToJSON(e any) string { - switch ee := e.(type) { - case protov1.Message: - mm := protojson.MarshalOptions{Indent: jsonIndent} - ret, err := mm.Marshal(protov1.MessageV2(ee)) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return string(ret) - case protov2.Message: + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { mm := protojson.MarshalOptions{ - Multiline: true, Indent: jsonIndent, + Multiline: true, } ret, err := mm.Marshal(ee) if err != nil { @@ -59,13 +51,13 @@ func ToJSON(e any) string { return fmt.Sprintf("%+v", ee) } return string(ret) - default: - ret, err := json.MarshalIndent(ee, "", jsonIndent) - if err != nil { - return fmt.Sprintf("%+v", ee) - } - return string(ret) } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) } // FormatJSON formats the input json bytes with indentation. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index b66dcb21..4552db16 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,21 +36,35 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false -var logger = grpclog.Component("dns") + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -196,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = internal.MinResolutionRate + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -210,29 +225,29 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { if !EnableSRVLookups { return nil, nil } var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) if err != nil { err = handleDNSError(err, "SRV") // may become nil return nil, err } for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) if err != nil { err = handleDNSError(err, "A") // may become nil if err == nil { @@ -269,8 +284,8 @@ func handleDNSError(err error, lookupType string) error { return err } -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) if err != nil { if envconfig.TXTErrIgnore { return nil @@ -297,8 +312,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - addrs, err := d.resolver.LookupHost(d.ctx, d.host) +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err @@ -316,8 +331,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { } func (d *dnsResolver) lookup() (*resolver.State, error) { - srv, srvErr := d.lookupSRV() - addrs, hostErr := d.lookupHost() + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } @@ -327,7 +344,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { - state.ServiceConfig = d.lookupTXT() + state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil } @@ -408,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index c7fc557d..c0eae4f5 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -28,7 +28,7 @@ import ( // NetResolver groups the methods on net.Resolver that are used by the DNS // resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. +// overridden from tests. type NetResolver interface { LookupHost(ctx context.Context, host string) (addrs []string, err error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) @@ -50,16 +50,23 @@ var ( // The following vars are overridden from tests. var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 83c38298..3deadfb4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -193,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err return false, c.err } if f != nil { - if !f(it) { // f wasn't successful + if !f() { // f wasn't successful c.mu.Unlock() return false, nil } @@ -495,21 +495,22 @@ type loopyWriter struct { ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, } return l } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index bd39ff9a..4a3ddce2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -51,14 +51,10 @@ import ( // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - if r.Method != "POST" { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) + http.Error(w, msg, http.StatusMethodNotAllowed) return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") @@ -69,6 +65,11 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s http.Error(w, msg, http.StatusUnsupportedMediaType) return nil, errors.New(msg) } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } if _, ok := w.(http.Flusher); !ok { msg := "gRPC requires a ResponseWriter supporting http.Flusher" http.Error(w, msg, http.StatusInternalServerError) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index eff87996..3c63c706 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -114,11 +114,11 @@ type http2Client struct { streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -140,9 +140,7 @@ type http2Client struct { // variable. kpDormant bool - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket onClose func(GoAwayReason) @@ -319,6 +317,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } + t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. @@ -346,11 +345,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -381,10 +394,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } sh.HandleConn(t.ctx, connBegin) } - t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - if err != nil { - return nil, err - } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() @@ -399,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerErrCh := make(chan error, 1) go t.reader(readerErrCh) defer func() { - if err == nil { - err = <-readerErrCh - } if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) t.Close(err) } }() @@ -449,8 +458,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -508,6 +521,17 @@ func (t *http2Client) getPeer() *peer.Peer { } } +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ @@ -756,8 +780,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return ErrConnClosing } if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { @@ -772,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -784,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -810,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -825,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { // Connection closed. @@ -928,16 +952,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.mu.Unlock() if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } }, rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -957,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. +// accessed anymore. func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only close once. @@ -982,10 +1006,13 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + <-t.writerDone t.cancel() t.conn.Close() - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -1090,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1243,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1708,7 +1735,7 @@ func (t *http2Client) keepalive() { // keepalive timer expired. In both cases, we need to send a ping. if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) timeoutLeft = t.kp.Timeout @@ -1738,40 +1765,23 @@ func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3839c1ad..b7091165 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -43,7 +44,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -118,8 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket bufferPool *bufferPool connectionID uint64 @@ -262,9 +261,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), bufferPool: newBufferPool(), } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) @@ -274,10 +288,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) - if err != nil { - return nil, err - } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() @@ -320,8 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -334,9 +343,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // closed, would lead to a TCP RST instead of FIN, and the client // encountering errors. For more info: // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() select { case <-t.readerDone: - case <-time.After(time.Second): + case <-timer.C: } t.conn.Close() } @@ -592,8 +603,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) @@ -658,8 +669,14 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: if err := t.operateHeaders(ctx, frame, handle); err != nil { - t.Close(err) - break + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue } case *http2.DataFrame: t.handleData(frame) @@ -842,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -996,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -1190,12 +1208,12 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) kpTimeoutLeft = t.kp.Timeout @@ -1235,7 +1253,7 @@ func (t *http2Server) Close(err error) { if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1256,9 +1274,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } } @@ -1375,38 +1393,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, nil } -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.peer.LocalAddr, - RemoteAddr: t.peer.Addr, - // RemoteName : +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), } - if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1439,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index dc29d590..39cef3bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -418,10 +418,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } -func getWriteBufferPool(writeBufferSize int) *sync.Pool { +func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 pool, ok := writeBufferPoolMap[size] if ok { return pool diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index b7b8fec1..4b39c0ad 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -28,6 +28,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -303,7 +304,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } @@ -362,8 +363,12 @@ func (s *Stream) SendCompress() string { // ClientAdvertisedCompressors returns the compressor names advertised by the // client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status @@ -566,7 +571,7 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int SharedWriteBuffer bool - ChannelzParentID *channelz.Identifier + ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -601,8 +606,8 @@ type ConnectOptions struct { ReadBufferSize int // SharedWriteBuffer indicates whether connections should reuse write buffer SharedWriteBuffer bool - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID *channelz.Identifier + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -815,30 +820,6 @@ const ( GoAwayTooManyPings GoAwayReason = 2 ) -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds/xds.go similarity index 89% rename from vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go rename to vendor/google.golang.org/grpc/internal/xds/xds.go index e8b49277..024c388b 100644 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ b/vendor/google.golang.org/grpc/internal/xds/xds.go @@ -14,7 +14,9 @@ * limitations under the License. */ -package internal +// Package xds contains methods to Get/Set handshake cluster names. It is separated +// out from the top level /internal package to avoid circular dependencies. +package xds import ( "google.golang.org/grpc/attributes" diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index a821ff9b..499a49c8 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -39,6 +41,34 @@ type Peer struct { AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bf56faa7..bdaa2130 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,8 +20,9 @@ package grpc import ( "context" + "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -32,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -97,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = ctx.Err().Error() + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) } switch ctx.Err() { case context.DeadlineExceeded: @@ -144,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -196,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go index 33b907a3..6997e474 100644 --- a/vendor/google.golang.org/grpc/reflection/adapt.go +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -19,10 +19,11 @@ package reflection import ( + "google.golang.org/grpc/reflection/internal" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // asV1Alpha returns an implementation of the v1alpha version of the reflection @@ -44,7 +45,7 @@ type v1AlphaServerStreamAdapter struct { } func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { - return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) + return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response)) } func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { @@ -52,136 +53,5 @@ func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequ if err != nil { return nil, err } - return v1AlphaToV1Request(resp), nil -} - -func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { - var v1alpha v1alphareflectionpb.ServerReflectionResponse - v1alpha.ValidHost = v1.ValidHost - if v1.OriginalRequest != nil { - v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) - } - switch mr := v1.MessageResponse.(type) { - case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1alphareflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphareflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1alpha -} - -func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { - var v1 v1reflectionpb.ServerReflectionRequest - v1.Host = v1alpha.Host - switch mr := v1alpha.MessageRequest.(type) { - case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr.FileContainingExtension != nil { - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1reflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - case *v1alphareflectionpb.ServerReflectionRequest_ListServices: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - default: - // no value set - } - return &v1 -} - -func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { - var v1alpha v1alphareflectionpb.ServerReflectionRequest - v1alpha.Host = v1.Host - switch mr := v1.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - } - default: - // no value set - } - return &v1alpha + return internal.V1AlphaToV1Request(resp), nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index 8953c9d8..666eda8e 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/reflection/v1/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go index d6cdd5b5..17d21fde 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/reflection/v1/reflection.proto @@ -36,8 +36,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" @@ -61,11 +61,12 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{stream} + x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} return x, nil } @@ -120,7 +121,7 @@ func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectio } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) } type ServerReflection_ServerReflectionInfoServer interface { diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 929733e7..cd032ace 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index ef691406..93886e38 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -33,8 +33,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" @@ -58,11 +58,12 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &serverReflectionServerReflectionInfoClient{stream} + x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} return x, nil } @@ -117,7 +118,7 @@ func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectio } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) } type ServerReflection_ServerReflectionInfoServer interface { diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go new file mode 100644 index 00000000..36ee6507 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go @@ -0,0 +1,436 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code that is shared by both reflection package and +// the test package. The packages are split in this way inorder to avoid +// depenedency to deprecated package github.com/golang/protobuf. +package internal + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerReflectionServer is the server API for ServerReflection service. +type ServerReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + S ServiceInfoProvider + DescResolver protodesc.Resolver + ExtResolver ExtensionResolver +} + +// FileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + if fd.IsPlaceholder() { + // If the given root file is a placeholder, treat it + // as missing instead of serializing it. + return nil, protoregistry.NotFound + } + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if currentfd.IsPlaceholder() { + // Skip any missing files in the dependency graph. + continue + } + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// FileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// FileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// AllExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// ListServices returns the names of services this server exposes. +func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.S.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &v1reflectionpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.DescResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.FileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.ListServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha. +func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1. +func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha. +func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1. +func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { + var v1 v1reflectionpb.ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1reflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index c3b40839..13a94e2d 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,19 +37,13 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "io" - "sort" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" + "google.golang.org/grpc/reflection/internal" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) @@ -158,203 +152,9 @@ func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.ExtensionResolver == nil { opts.ExtensionResolver = protoregistry.GlobalTypes } - return &serverReflectionServer{ - s: opts.Services, - descResolver: opts.DescriptorResolver, - extResolver: opts.ExtensionResolver, - } -} - -type serverReflectionServer struct { - v1alphareflectiongrpc.UnimplementedServerReflectionServer - s ServiceInfoProvider - descResolver protodesc.Resolver - extResolver ExtensionResolver -} - -// fileDescWithDependencies returns a slice of serialized fileDescriptors in -// wire format ([]byte). The fileDescriptors will include fd and all the -// transitive dependencies of fd with names not in sentFileDescriptors. -func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { - if fd.IsPlaceholder() { - // If the given root file is a placeholder, treat it - // as missing instead of serializing it. - return nil, protoregistry.NotFound - } - var r [][]byte - queue := []protoreflect.FileDescriptor{fd} - for len(queue) > 0 { - currentfd := queue[0] - queue = queue[1:] - if currentfd.IsPlaceholder() { - // Skip any missing files in the dependency graph. - continue - } - if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.Path()] = true - fdProto := protodesc.ToFileDescriptorProto(currentfd) - currentfdEncoded, err := proto.Marshal(fdProto) - if err != nil { - return nil, err - } - r = append(r, currentfdEncoded) - } - for i := 0; i < currentfd.Imports().Len(); i++ { - queue = append(queue, currentfd.Imports().Get(i)) - } - } - return r, nil -} - -// fileDescEncodingContainingSymbol finds the file descriptor containing the -// given symbol, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. The given symbol -// can be a type, a service or a method. -func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) - if err != nil { - return nil, err - } - return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) -} - -// fileDescEncodingContainingExtension finds the file descriptor containing -// given extension, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) - if err != nil { - return nil, err - } - return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) -} - -// allExtensionNumbersForTypeName returns all extension numbers for the given type. -func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - var numbers []int32 - s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { - numbers = append(numbers, int32(xt.TypeDescriptor().Number())) - return true - }) - sort.Slice(numbers, func(i, j int) bool { - return numbers[i] < numbers[j] - }) - if len(numbers) == 0 { - // maybe return an error if given type name is not known - if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { - return nil, err - } - } - return numbers, nil -} - -// listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { - serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) - for svc := range serviceInfo { - resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) - } - sort.Slice(resp, func(i, j int) bool { - return resp[i].Name < resp[j].Name - }) - return resp -} - -// ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - sentFileDescriptors := make(map[string]bool) - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - out := &v1reflectionpb.ServerReflectionResponse{ - ValidHost: in.Host, - OriginalRequest: in, - } - switch req := in.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - var b [][]byte - fd, err := s.descResolver.FindFileByPath(req.FileByFilename) - if err == nil { - b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) - } - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - typeName := req.FileContainingExtension.ContainingType - extNum := req.FileContainingExtension.ExtensionNumber - b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ - BaseTypeName: req.AllExtensionNumbersOfType, - ExtensionNumber: extNums, - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ - Service: s.listServices(), - }, - } - default: - return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) - } - - if err := stream.Send(out); err != nil { - return err - } + return &internal.ServerReflectionServer{ + S: opts.Services, + DescResolver: opts.DescriptorResolver, + ExtResolver: opts.ExtensionResolver, } } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8a..3edca296 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -63,7 +63,7 @@ LEGACY_SOURCES=( # Generates only the new gRPC Service symbols SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto @@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ @@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # grpc_testing_not_regenerate/*.pb.go are not re-generated, # see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 14aa6f20..ef3d6ed6 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -18,19 +18,43 @@ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. package dns import ( + "time" + "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. // // Deprecated: import grpc and use resolver.Get("dns") instead. func NewBuilder() resolver.Builder { return dns.NewBuilder() } + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index adf89dd9..20285451 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -63,16 +64,18 @@ func Get(scheme string) Builder { } // SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". +// scheme is initially set to "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme + internal.UserSetDefaultScheme = true } -// GetDefaultScheme gets the default scheme that will be used. +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. func GetDefaultScheme() string { return defaultScheme } @@ -168,6 +171,9 @@ type BuildOptions struct { // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string } // An Endpoint is one network endpoint, or server, which may have multiple @@ -281,9 +287,9 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } -// String returns a string representation of Target. +// String returns the canonical string representation of Target. func (t Target) String() string { - return t.URL.String() + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c79bab12..c5fb4523 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -75,6 +75,7 @@ func (ccr *ccResolverWrapper) start() error { DialCreds: ccr.cc.dopts.copts.TransportCredentials, CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, } var err error ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) @@ -96,7 +97,7 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { // finished shutting down, the channel should block on ccr.serializer.Done() // without cc.mu held. func (ccr *ccResolverWrapper) close() { - channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") ccr.mu.Lock() ccr.closed = true ccr.mu.Unlock() @@ -146,7 +147,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { return } ccr.mu.Unlock() - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) } @@ -170,7 +171,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new @@ -193,5 +194,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d17ede0f..fdd49e6e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -744,17 +744,19 @@ type payloadInfo struct { uncompressedBytes []byte } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) + return nil, nil, err } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return nil, nil, st.Err() } var size int @@ -762,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + size = len(uncompressedBuf) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } else { + uncompressedBuf = compressedBuf + } + + if payInfo != nil { + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) } } - return buf, nil + + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. @@ -796,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err @@ -811,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } return nil } @@ -946,22 +962,9 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. +// support package version is 9. // // Older versions are kept for compatibility. // @@ -973,6 +976,7 @@ const ( SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0bf5c78b..89f8e479 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -137,8 +137,7 @@ type Server struct { serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Server serverWorkerChannel chan func() serverWorkerChannelClose func() @@ -249,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption { } // WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s @@ -530,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption { }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } } // HeaderTableSize returns a ServerOption that sets the size of dynamic @@ -661,7 +668,7 @@ func NewServer(opt ...ServerOption) *Server { services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - czData: new(channelzData), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -675,8 +682,7 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - channelz.Info(logger, s.channelzID, "Server created") + channelz.Info(logger, s.channelz, "Server created") return s } @@ -802,20 +808,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID *channelz.Identifier -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } + channelz *channelz.Socket } func (l *listenSocket) Close() error { err := l.Listener.Close() - channelz.RemoveEntry(l.channelzID) - channelz.Info(logger, l.channelzID, "ListenSocket deleted") + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") return err } @@ -857,7 +856,16 @@ func (s *Server) Serve(lis net.Listener) error { } }() - ls := &listenSocket{Listener: lis} + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } s.lis[ls] = true defer func() { @@ -869,14 +877,8 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var err error - ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - if err != nil { - s.mu.Unlock() - return err - } s.mu.Unlock() - channelz.Info(logger, ls.channelzID, "ListenSocket created") + channelz.Info(logger, ls.channelz, "ListenSocket created") var tempDelay time.Duration // how long to sleep on accept failure for { @@ -975,7 +977,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, SharedWriteBuffer: s.opts.sharedWriteBuffer, - ChannelzParentID: s.channelzID, + ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } @@ -989,7 +991,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -1121,37 +1123,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) { } } -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.channelz.ServerMetrics.CallsSucceeded.Add(1) } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.channelz.ServerMetrics.CallsFailed.Add(1) } func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1342,10 +1335,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1353,6 +1347,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1394,7 +1390,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { @@ -1434,7 +1430,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1762,7 +1758,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1819,7 +1815,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1891,8 +1887,7 @@ func (s *Server) stop(graceful bool) { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) s.mu.Lock() s.closeListenersLocked() // Wait for serving threads to be ready to exit. Only then can we be sure no @@ -2117,7 +2112,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil + return stream.ClientAdvertisedCompressors(), nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. @@ -2147,17 +2142,9 @@ func Method(ctx context.Context) (string, bool) { return s.Method(), true } -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} - // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { +func validateSendCompressor(name string, clientCompressors []string) error { if name == encoding.Identity { return nil } @@ -2166,7 +2153,7 @@ func validateSendCompressor(name, clientCompressors string) error { return fmt.Errorf("compressor not registered %q", name) } - for _, c := range strings.Split(clientCompressors, ",") { + for _, c := range clientCompressors { if c == name { return nil // found match } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc0..2671c5ef 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,8 +25,11 @@ import ( "reflect" "time" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -41,11 +44,6 @@ const maxInt = int(^uint(0) >> 1) // https://github.com/grpc/grpc/blob/master/doc/service_config.md type MethodConfig = internalserviceconfig.MethodConfig -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} - // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // @@ -55,14 +53,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. This is - // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, - // lbConfig will be used. - LB *string - // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig + lbConfig serviceconfig.LoadBalancingConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the @@ -164,38 +157,55 @@ type jsonMC struct { // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *internalserviceconfig.BalancerConfig + LoadBalancingConfig *json.RawMessage MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if c := rsc.LoadBalancingConfig; c != nil { - sc.lbConfig = &lbConfig{ - name: c.Name, - cfg: c.Config, + c := rsc.LoadBalancingConfig + if c == nil { + name := pickfirst.Name + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy } + if balancer.Get(name) == nil { + name = pickfirst.Name + } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} + } + sc.lbConfig = cfg if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} @@ -211,8 +221,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -232,13 +242,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -257,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -271,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d..fdb0bd65 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {} type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any @@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 814e9983..8051ef5b 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,7 +35,6 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" @@ -516,6 +516,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s + a.ctx = s.Context() a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -655,13 +656,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } @@ -698,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 00000000..8b813529 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + Recv() (*Res, error) + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingServer[Res any] interface { + Send(*Res) error + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + Send(*Req) error + CloseAndRecv() (*Res, error) + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + SendAndClose(*Res) error + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + Send(*Req) error + Recv() (*Res, error) + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + Send(*Res) error + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index df85a021..bafaef99 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.62.0" +const Version = "1.65.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 7a33c215..00000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, go vet, go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: -Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' - -echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f4790237..bb2966e3 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 3f75098b..29846df2 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,15 +25,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in JSON format using default options. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -110,8 +112,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal marshals the given [proto.Message] in the JSON format using options in -// MarshalOptions. Do not depend on the output being stable. It may change over -// time across different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 25329b76..4b177c82 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -322,6 +322,10 @@ func (d decoder) skipJSONValue() error { if open > d.opts.RecursionLimit { return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") } if open == 0 { return nil diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112b..24bc98ac 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e81..1f57e661 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8..87e46bd4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 00000000..ff6a3836 Binary files /dev/null and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 00000000..029a6a12 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6eb..ea1d3e65 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d2083..7e87c760 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e78..099b2bf4 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35..c2d6bd52 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 193c68e8..df53ff40 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -68,7 +69,7 @@ type ( Extensions Extensions Services Services - EditionFeatures FileEditionFeatures + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage @@ -76,10 +77,13 @@ type ( Locations SourceLocations } - FileEditionFeatures struct { + EditionFeatures struct { // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool @@ -95,6 +99,9 @@ type ( // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool } ) @@ -102,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -156,6 +166,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -194,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -217,6 +232,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -241,17 +258,12 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor - // Edition features. - Presence bool + EditionFeatures EditionFeatures } Oneof struct { @@ -261,6 +273,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -310,28 +324,30 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + if fd.L1.Cardinality == protoreflect.Repeated { + return false } - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false } - return fd.L1.IsPacked + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -367,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -378,10 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -404,16 +421,16 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -436,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -517,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -560,6 +587,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -580,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..8a57d60b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -111,8 +113,12 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +126,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +162,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -164,6 +179,14 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 + } + + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) } // Must allocate all declarations before parsing each descriptor type @@ -219,10 +242,33 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -275,6 +321,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +427,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -391,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -415,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -447,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..e56c91a8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -414,6 +419,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +471,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -489,13 +501,18 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -557,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -577,25 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fd..f4107c05 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..11f5f356 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) + } + return defaultsCache[match] +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc..bfb3b841 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4e..ba83fea4 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 8f94230e..f30ab6b5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -18,6 +18,22 @@ const ( Edition_enum_name = "Edition" ) +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -213,6 +229,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -297,12 +319,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -474,7 +525,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -497,7 +547,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -523,7 +572,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -543,6 +591,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -599,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -613,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -630,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -639,24 +697,59 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + // Names for google.protobuf.FieldOptions.EditionDefault. const ( FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" @@ -678,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -740,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -753,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -813,6 +936,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -909,36 +1039,79 @@ const ( FeatureSet_FieldPresence_enum_name = "FieldPresence" ) +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.EnumType. const ( FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" FeatureSet_EnumType_enum_name = "EnumType" ) +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. const ( FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" ) +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.Utf8Validation. const ( FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" FeatureSet_Utf8Validation_enum_name = "Utf8Validation" ) +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.MessageEncoding. const ( FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" FeatureSet_MessageEncoding_enum_name = "MessageEncoding" ) +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.JsonFormat. const ( FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" FeatureSet_JsonFormat_enum_name = "JsonFormat" ) +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" @@ -971,17 +1144,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. @@ -1085,3 +1261,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..9a652a2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98d..5d5771c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041ed..f29e6a8f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..4bb0a7a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { @@ -107,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241..78ee47e4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16..fb35f0ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf..7a16ec13 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2ef..e06ece55 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f8913651..18cb96fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a05..304244a6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6..febd2122 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0ba..e31249f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb..81b2b1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d05..6e8677ee 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab09108..b649f112 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c629..bf0b6049 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -563,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdc..019399d4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010b..ecb4623d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b..99dc23c6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e9443..da685e8a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31..5f20ca5d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e..a1f09162 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index d8f48faf..dbbf1f68 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 32 - Patch = 0 + Minor = 34 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b56..d75a6534 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f..1f847bcc 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a..d248f292 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent @@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45..575d1483 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49..052fb5ae 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index baa0cc62..8fbecb4f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,7 @@ package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -91,15 +92,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 case "editions": f.L1.Syntax = protoreflect.Editions f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) } f.L1.Path = fd.GetName() @@ -114,9 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - if f.L1.Syntax == protoreflect.Editions { - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index aff6fd49..85617554 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +116,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -121,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -138,28 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.StringName.InitJSON(fd.GetJsonName()) } - if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) - // We reuse the existing field because the old option `[packed = - // true]` is mutually exclusive with the editions feature. - if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { - f.L1.HasPacked = true - f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) - } + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } - // We pretend this option is always explicitly set because the only - // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 - // or to return the appropriate default. - // When using editions we either parse the option or resolve the - // appropriate default here (instead of later when this option is - // requested from the descriptor). - // In proto2/proto3 syntax HasEnforceUTF8 might be false. - f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) - - if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { - f.L1.Kind = protoreflect.GroupKind - } + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind } } return fs, nil @@ -172,6 +181,7 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } @@ -188,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..f3cebab2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { @@ -276,8 +281,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..6de31c2e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 7352926c..804830ed 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -5,29 +5,24 @@ package protodesc import ( - _ "embed" "fmt" "os" "sync" + "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) -const ( - SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 - SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 -) - -//go:embed editions_defaults.binpb -var binaryEditionDefaults []byte var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) func init() { - err := proto.Unmarshal(binaryEditionDefaults, defaults) + err := proto.Unmarshal(editiondefaults.Defaults, defaults) if err != nil { fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) os.Exit(1) @@ -67,53 +62,74 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) os.Exit(1) } - fs := defaults.GetDefaults()[0].GetFeatures() + fsed := defaults.GetDefaults()[0] // Using a linear search for now. // Editions are guaranteed to be sorted and thus we could use a binary search. // Given that there are only a handful of editions (with one more per year) // there is not much reason to use a binary search. for _, def := range defaults.GetDefaults() { if def.GetEdition() <= edpb { - fs = def.GetFeatures() + fsed = def } else { break } } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) defaultsCache[ed] = fs return fs } -func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.FieldPresence == nil { - return fileDesc.L1.EditionFeatures.IsFieldPresence +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN } - return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT -} -func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.RepeatedFieldEncoding == nil { - return fileDesc.L1.EditionFeatures.IsPacked + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED } - return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED -} -func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.Utf8Validation == nil { - return fileDesc.L1.EditionFeatures.IsUTF8Validated + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY } - return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY -} -func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.MessageEncoding == nil { - return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED } - return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS } // initFileDescFromFeatureSet initializes editions related fields in fd based @@ -122,56 +138,8 @@ func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descrip // before calling this function. func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { dfs := getFeatureSetFor(fd.L1.Edition) - if fs == nil { - fs = &descriptorpb.FeatureSet{} - } - - var fieldPresence descriptorpb.FeatureSet_FieldPresence - if fp := fs.FieldPresence; fp != nil { - fieldPresence = *fp - } else { - fieldPresence = *dfs.FieldPresence - } - fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fieldPresence == descriptorpb.FeatureSet_EXPLICIT - - var enumType descriptorpb.FeatureSet_EnumType - if et := fs.EnumType; et != nil { - enumType = *et - } else { - enumType = *dfs.EnumType - } - fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN - - var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding - if rfe := fs.RepeatedFieldEncoding; rfe != nil { - respeatedFieldEncoding = *rfe - } else { - respeatedFieldEncoding = *dfs.RepeatedFieldEncoding - } - fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED - - var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation - if utf8val := fs.Utf8Validation; utf8val != nil { - isUTF8Validated = *utf8val - } else { - isUTF8Validated = *dfs.Utf8Validation - } - fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY - - var messageEncoding descriptorpb.FeatureSet_MessageEncoding - if me := fs.MessageEncoding; me != nil { - messageEncoding = *me - } else { - messageEncoding = *dfs.MessageEncoding - } - fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED - - var jsonFormat descriptorpb.FeatureSet_JsonFormat - if jf := fs.JsonFormat; jf != nil { - jsonFormat = *jf - } else { - jsonFormat = *dfs.JsonFormat - } - fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb deleted file mode 100644 index 1a8610a8..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb +++ /dev/null @@ -1,4 +0,0 @@ - -  (0 -  (0 -  (0 ( \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9d6e0542..a5de8d40 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index ec6572df..c85bfaa5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false @@ -175,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 0c045db6..ea154eec 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -375,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -521,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4..cd8fadba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and @@ -544,6 +544,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f..75f83a2a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 16030973..9fe83cef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e..7f3583ea 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011..f7d38699 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52..de177733 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 38daa858..9403eb07 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -54,6 +54,9 @@ type Edition int32 const ( // A placeholder for an unknown edition value. Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 // Legacy syntax "editions". These pre-date editions, but behave much like // distinct editions. These can't be used to specify the edition of proto // files, but feature definitions must supply proto2/proto3 defaults for @@ -64,6 +67,7 @@ const ( // should not be depended on, but they will always be time-ordered for easy // comparison. Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be // used or relyed on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 @@ -71,31 +75,41 @@ const ( Edition_EDITION_99997_TEST_ONLY Edition = 99997 Edition_EDITION_99998_TEST_ONLY Edition = 99998 Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 ) // Enum value maps for Edition. var ( Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", + 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, + "EDITION_2024": 1001, "EDITION_1_TEST_ONLY": 1, "EDITION_2_TEST_ONLY": 2, "EDITION_99997_TEST_ONLY": 99997, "EDITION_99998_TEST_ONLY": 99998, "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, } ) @@ -954,21 +968,21 @@ type FeatureSet_Utf8Validation int32 const ( FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_NONE FeatureSet_Utf8Validation = 1 FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 ) // Enum value maps for FeatureSet_Utf8Validation. var ( FeatureSet_Utf8Validation_name = map[int32]string{ 0: "UTF8_VALIDATION_UNKNOWN", - 1: "NONE", 2: "VERIFY", + 3: "NONE", } FeatureSet_Utf8Validation_value = map[string]int32{ "UTF8_VALIDATION_UNKNOWN": 0, - "NONE": 1, "VERIFY": 2, + "NONE": 3, } ) @@ -1643,12 +1657,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -2168,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2195,7 +2213,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -2244,7 +2261,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -2352,13 +2368,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2472,10 +2481,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2496,6 +2501,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2655,19 +2664,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2687,7 +2688,8 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2819,6 +2821,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet { return nil } +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3003,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3066,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3976,6 +3994,88 @@ func (x *FieldOptions_EditionDefault) GetValue() string { return "" } +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3993,7 +4093,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4006,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4045,14 +4145,17 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4065,7 +4168,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4088,9 +4191,16 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { return Edition_EDITION_UNKNOWN } -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { if x != nil { - return x.Features + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures } return nil } @@ -4104,7 +4214,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -4196,7 +4306,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4209,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4283,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4296,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4451,7 +4561,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, @@ -4468,290 +4578,239 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, - 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, - 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, - 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, - 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, - 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, - 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, - 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, - 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, - 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, - 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, - 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, - 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, - 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, - 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, - 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, - 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, - 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, - 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, - 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, - 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, @@ -4760,323 +4819,414 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, - 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, - 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, - 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, - 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, - 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, - 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, - 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, - 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, - 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, - 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, - 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, - 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, - 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, - 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, - 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, - 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, - 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, - 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, - 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, - 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, - 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, - 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, - 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, - 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, - 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, - 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, - 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, - 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, - 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, - 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, - 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, - 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, - 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, - 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5092,8 +5242,8 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { } var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5139,10 +5289,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto @@ -5187,40 +5338,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet - 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 71, // [71:71] is the sub-list for method output_type - 71, // [71:71] is the sub-list for method input_type - 71, // [71:71] is the sub-list for extension type_name - 71, // [71:71] is the sub-list for extension extendee - 0, // [0:71] is the sub-list for field type_name + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5229,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5241,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5253,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5265,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5279,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5291,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5303,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5315,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5327,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5339,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5351,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5365,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5379,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5393,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5407,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5421,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5435,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5449,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5463,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5475,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5489,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5501,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5513,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5525,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5537,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5549,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5561,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5573,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5585,7 +5742,19 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5597,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5609,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5621,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -5640,7 +5809,7 @@ func file_google_protobuf_descriptor_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, NumEnums: 17, - NumMessages: 32, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..a2ca940c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,181 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/go_features.proto + +package gofeaturespb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "pb.go", + Tag: "bytes,1002,opt,name=go", + Filename: "google/protobuf/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] +) + +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +} + +var ( + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc +) + +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + }) + return file_google_protobuf_go_features_proto_rawDescData +} + +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, + }.Build() + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be5..7172b43d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8d..1b71bcd9 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba..d87b4fb8 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb3..ac1e91bb 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b8..d45361cb 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a33..83a5a645 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a8713..e473f826 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/gopkg.in/ini.v1/.editorconfig new file mode 100644 index 00000000..4a2d9180 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore index 12411127..588388bd 100644 --- a/vendor/gopkg.in/ini.v1/.gitignore +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -4,3 +4,4 @@ ini.sublime-workspace testdata/conf_reflect.ini .idea /.vscode +.DS_Store diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 00000000..631e3692 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md index 5d65658b..30606d97 100644 --- a/vendor/gopkg.in/ini.v1/README.md +++ b/vendor/gopkg.in/ini.v1/README.md @@ -1,6 +1,6 @@ # INI -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo) +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) [![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) [![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) @@ -24,7 +24,7 @@ Package ini provides INI file read and write functionality in Go. ## Installation -The minimum requirement of Go is **1.6**. +The minimum requirement of Go is **1.13**. ```sh $ go get gopkg.in/ini.v1 diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml index fc947f23..e02ec84b 100644 --- a/vendor/gopkg.in/ini.v1/codecov.yml +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -4,6 +4,13 @@ coverage: project: default: threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true comment: - layout: 'diff, files' + layout: 'diff' + +github_checks: false diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go index e8bda06e..48b8e66d 100644 --- a/vendor/gopkg.in/ini.v1/deprecated.go +++ b/vendor/gopkg.in/ini.v1/deprecated.go @@ -14,12 +14,9 @@ package ini -const ( +var ( // Deprecated: Use "DefaultSection" instead. DEFAULT_SECTION = DefaultSection -) - -var ( // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. AllCapsUnderscore = SnackCase ) diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go index d88347c5..f66bc94b 100644 --- a/vendor/gopkg.in/ini.v1/error.go +++ b/vendor/gopkg.in/ini.v1/error.go @@ -32,3 +32,18 @@ func IsErrDelimiterNotFound(err error) bool { func (err ErrDelimiterNotFound) Error() string { return fmt.Sprintf("key-value delimiter not found: %s", err.Line) } + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go index f95606f9..f8b22408 100644 --- a/vendor/gopkg.in/ini.v1/file.go +++ b/vendor/gopkg.in/ini.v1/file.go @@ -55,6 +55,9 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File { if len(opts.KeyValueDelimiterOnWrite) == 0 { opts.KeyValueDelimiterOnWrite = "=" } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } return &File{ BlockMode: true, @@ -82,7 +85,7 @@ func (f *File) NewSection(name string) (*Section, error) { return nil, errors.New("empty section name") } - if f.options.Insensitive && name != DefaultSection { + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { name = strings.ToLower(name) } @@ -139,12 +142,18 @@ func (f *File) GetSection(name string) (*Section, error) { return secs[0], err } +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + // SectionsByName returns all sections with given name. func (f *File) SectionsByName(name string) ([]*Section, error) { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -165,8 +174,9 @@ func (f *File) SectionsByName(name string) ([]*Section, error) { func (f *File) Section(name string) *Section { sec, err := f.GetSection(name) if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. + if name == "" { + name = DefaultSection + } sec, _ = f.NewSection(name) return sec } @@ -236,7 +246,7 @@ func (f *File) DeleteSectionWithIndex(name string, index int) error { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -299,6 +309,9 @@ func (f *File) Reload() (err error) { } return err } + if f.options.ShortCircuit { + return nil + } } return nil } @@ -329,6 +342,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { // Use buffer to make sure target is safe until finish encoding. buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 for i, sname := range f.sectionList { sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) if len(sec.Comment) > 0 { @@ -347,7 +361,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if i > 0 || DefaultHeader { + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return nil, err } @@ -358,12 +372,13 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } + isLastSection := i == lastSectionIdx if sec.isRawSection { if _, err := buf.WriteString(sec.rawBody); err != nil { return nil, err } - if PrettySection { + if PrettySection && !isLastSection { // Put a line between sections if _, err := buf.WriteString(LineBreak); err != nil { return nil, err @@ -429,16 +444,14 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { kname = `"""` + kname + `"""` } - for _, val := range key.ValueWithShadows() { + writeKeyValue := func(val string) (bool, error) { if _, err := buf.WriteString(kname); err != nil { - return nil, err + return false, err } if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KeyList + buf.WriteString(LineBreak) + return true, nil } // Write out alignment spaces before "=" sign @@ -451,12 +464,31 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { val = `"""` + val + `"""` } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` } if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { return nil, err } } + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + for _, val := range key.nestedValues { if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { return nil, err @@ -464,7 +496,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if PrettySection { + if PrettySection && !isLastSection { // Put a line between sections if _, err := buf.WriteString(LineBreak); err != nil { return nil, err @@ -494,7 +526,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { // SaveToIndent writes content to file system with given value indention. func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. + // so it's safer to save to a temporary file location and rename after done. buf, err := f.writeToBuffer(indent) if err != nil { return err diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 2961543f..99e7f865 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -1,5 +1,3 @@ -// +build go1.6 - // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may @@ -25,15 +23,15 @@ import ( ) const ( - // DefaultSection is the name of default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. depthValues = 99 ) var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + // LineBreak is the delimiter to determine or compose a new line. // This variable will be changed to "\r\n" automatically on Windows at package init time. LineBreak = "\n" @@ -71,12 +69,18 @@ type LoadOptions struct { Loose bool // Insensitive indicates whether the parser forces all section and key names to lowercase. Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool // IgnoreContinuation indicates whether to ignore continuation lines while parsing. IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. IgnoreInlineComment bool // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool @@ -107,8 +111,10 @@ type LoadOptions struct { UnparseableSections []string // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". KeyValueDelimiters string - // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=". + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). PreserveSurroundedQuote bool // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). @@ -117,6 +123,8 @@ type LoadOptions struct { ReaderBufferSize int // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool } // DebugFunc is the type of function called to log parse events. diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go index 8baafd9e..a19d9f38 100644 --- a/vendor/gopkg.in/ini.v1/key.go +++ b/vendor/gopkg.in/ini.v1/key.go @@ -54,14 +54,16 @@ func (k *Key) addShadow(val string) error { return errors.New("cannot add shadow to auto-increment or boolean key") } - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { return nil } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } } shadow := newKey(k.s, k.name, val) @@ -108,15 +110,24 @@ func (k *Key) Value() string { return k.value } -// ValueWithShadows returns raw values of key and its shadows if any. +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. func (k *Key) ValueWithShadows() []string { if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } return []string{k.value} } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } } return vals } @@ -781,10 +792,8 @@ func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]u return vals, err } - type Parser func(str string) (interface{}, error) - // parseTimesFormat transforms strings to times in given format. func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { vals := make([]time.Time, 0, len(strs)) @@ -801,7 +810,6 @@ func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnO return vals, err } - // doParse transforms strings to different types func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { vals := make([]interface{}, 0, len(strs)) diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index ea6c08b0..44fc526c 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -131,7 +131,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { // Check if key name surrounded by quotes. var keyQuote string if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { + if len(line) > 6 && line[0:3] == `"""` { keyQuote = `"""` } else { keyQuote = `"` @@ -164,6 +164,10 @@ func readKeyName(delimiters string, in []byte) (string, int, error) { if endIdx < 0 { return "", -1, ErrDelimiterNotFound{line} } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil } @@ -232,7 +236,7 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { } var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { + if len(line) > 3 && line[0:3] == `"""` { valQuote = `"""` } else if line[0] == '`' { valQuote = "`" @@ -289,12 +293,8 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) { hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { line = line[1 : len(line)-1] } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { return p.readPythonMultilines(line, bufferSize) } @@ -306,15 +306,9 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro parserBufferPeekResult, _ := p.buf.Peek(bufferSize) peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - indentSize := 0 for { peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line) - return line, nil - } - + if peekErr != nil && peekErr != io.EOF { p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) return "", peekErr } @@ -333,19 +327,6 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro return line, nil } - // Determine indent size and line prefix. - currentIndentSize := len(peekMatches[1]) - if indentSize < 1 { - indentSize = currentIndentSize - p.debug("readPythonMultilines: indent size is %d", indentSize) - } - - // Make sure each line is indented at least as far as first line. - if currentIndentSize < indentSize { - p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line) - return line, nil - } - // Advance the parser reader (buffer) in-sync with the peek buffer. _, err := p.buf.Discard(len(peekData)) if err != nil { @@ -353,8 +334,7 @@ func (p *parser) readPythonMultilines(line string, bufferSize int) (string, erro return "", err } - // Handle indented empty line. - line += "\n" + peekMatches[1][indentSize:] + peekMatches[2] + line += "\n" + peekMatches[0] } } @@ -377,7 +357,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Ignore error because default section name is never empty string. name := DefaultSection - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(DefaultSection) } section, _ := f.NewSection(name) @@ -465,11 +445,13 @@ func (f *File) parse(reader io.Reader) (err error) { // Reset auto-counter and comments p.comment.Reset() p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false inUnparseableSection = false for i := range f.options.UnparseableSections { if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) { + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { inUnparseableSection = true continue } @@ -485,8 +467,9 @@ func (f *File) parse(reader io.Reader) (err error) { kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) if err != nil { + switch { // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) { + case IsErrDelimiterNotFound(err): switch { case f.options.AllowBooleanKeys: kname, err := p.readValue(line, parserBufferSize) @@ -504,6 +487,8 @@ func (f *File) parse(reader io.Reader) (err error) { case f.options.SkipUnrecognizableLines: continue } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue } return err } diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go index 6ba5ac29..a3615d82 100644 --- a/vendor/gopkg.in/ini.v1/section.go +++ b/vendor/gopkg.in/ini.v1/section.go @@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) { func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } @@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) { if s.f.BlockMode { s.f.lock.RLock() } - if s.f.options.Insensitive { + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } key := s.keys[name] @@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) { // Check if it is a child-section. sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key { var parentKeys []*Key sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -217,7 +217,7 @@ func (s *Section) KeysHash() map[string]string { defer s.f.lock.RUnlock() } - hash := map[string]string{} + hash := make(map[string]string, len(s.keysHash)) for key, value := range s.keysHash { hash[key] = value } @@ -245,7 +245,7 @@ func (s *Section) DeleteKey(name string) { // For example, "[parent.child1]" and "[parent.child12]" are child sections // of section "[parent]". func (s *Section) ChildSections() []*Section { - prefix := s.name + "." + prefix := s.name + s.f.options.ChildSectionDelimiter children := make([]*Section, 0, 3) for _, name := range s.f.sectionList { if strings.HasPrefix(name, prefix) { diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index 9be40a92..a486b2fe 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -263,24 +263,21 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri return nil } -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) { - opts := strings.SplitN(tag, ",", 4) +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - if len(opts) > 3 { - allowNonUnique = opts[3] == "nonunique" - } - return rawName, omitEmpty, allowShadow, allowNonUnique + return rawName, omitEmpty, allowShadow, allowNonUnique, extends } // mapToField maps the given value to the matching field of the given section. // The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error { +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -295,7 +292,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) continue } - rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue @@ -303,12 +300,26 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) isStruct := tpField.Type.Kind() == reflect.Struct isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymous { + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { field.Set(reflect.New(tpField.Type.Elem())) } - if isAnonymous || isStruct || isStructPtr { + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { if secs, err := s.f.SectionsByName(fieldName); err == nil { if len(secs) <= sectionIndex { return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) @@ -318,7 +329,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) if isStructPtr && field.IsNil() { field.Set(reflect.New(tpField.Type.Elem())) } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil { + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { return fmt.Errorf("map to field %q: %v", fieldName, err) } continue @@ -357,7 +368,7 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) ( typ := val.Type().Elem() for i, sec := range secs { elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i); err != nil { + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) } @@ -387,7 +398,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error { return nil } - return s.mapToField(val, isStrict, 0) + return s.mapToField(val, isStrict, 0, s.name) } // MapTo maps section to given struct. @@ -479,7 +490,7 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all _ = keyWithShadows.AddShadow(val) } } - key = keyWithShadows + *key = *keyWithShadows return nil } @@ -581,7 +592,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) if omitEmpty && isEmptyValue(field) { continue } @@ -595,7 +606,14 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) diff --git a/vendor/k8s.io/client-go/discovery/BUILD.bazel b/vendor/k8s.io/client-go/discovery/BUILD.bazel index b9b95f84..0fc21b73 100644 --- a/vendor/k8s.io/client-go/discovery/BUILD.bazel +++ b/vendor/k8s.io/client-go/discovery/BUILD.bazel @@ -25,6 +25,6 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/scheme", "//vendor/k8s.io/client-go/openapi", "//vendor/k8s.io/client-go/rest", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", ], ) diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/handler3/BUILD.bazel index bafa56f7..11b90b49 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/BUILD.bazel +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/BUILD.bazel @@ -14,6 +14,6 @@ go_library( "//vendor/k8s.io/kube-openapi/pkg/cached", "//vendor/k8s.io/kube-openapi/pkg/common", "//vendor/k8s.io/kube-openapi/pkg/spec3", - "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//proto", ], ) diff --git a/vendor/modules.txt b/vendor/modules.txt index cdb1071a..b6d2ccab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,29 +7,55 @@ bazel.build/protobuf # cirello.io/dynamolock v1.4.0 ## explicit; go 1.12 cirello.io/dynamolock -# cloud.google.com/go v0.112.0 -## explicit; go 1.19 +# cloud.google.com/go v0.115.0 +## explicit; go 1.20 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.24.0 -## explicit; go 1.19 -cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.3 -## explicit; go 1.19 +# cloud.google.com/go/auth v0.7.1 +## explicit; go 1.20 +cloud.google.com/go/auth +cloud.google.com/go/auth/credentials +cloud.google.com/go/auth/credentials/internal/externalaccount +cloud.google.com/go/auth/credentials/internal/externalaccountuser +cloud.google.com/go/auth/credentials/internal/gdch +cloud.google.com/go/auth/credentials/internal/impersonate +cloud.google.com/go/auth/credentials/internal/stsexchange +cloud.google.com/go/auth/grpctransport +cloud.google.com/go/auth/httptransport +cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/credsfile +cloud.google.com/go/auth/internal/jwt +cloud.google.com/go/auth/internal/transport +cloud.google.com/go/auth/internal/transport/cert +# cloud.google.com/go/auth/oauth2adapt v0.2.3 +## explicit; go 1.20 +cloud.google.com/go/auth/oauth2adapt +# cloud.google.com/go/compute/metadata v0.5.0 +## explicit; go 1.20 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.6 -## explicit; go 1.19 +# cloud.google.com/go/iam v1.1.11 +## explicit; go 1.20 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.36.0 -## explicit; go 1.19 +# cloud.google.com/go/storage v1.43.0 +## explicit; go 1.20 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f +# dario.cat/mergo v1.0.0 +## explicit; go 1.13 +dario.cat/mergo +# github.com/Microsoft/go-winio v0.6.2 +## explicit; go 1.21 +github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/internal/fs +github.com/Microsoft/go-winio/internal/socket +github.com/Microsoft/go-winio/internal/stringbuffer +github.com/Microsoft/go-winio/pkg/guid +# github.com/ProtonMail/go-crypto v1.0.0 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -41,6 +67,8 @@ github.com/ProtonMail/go-crypto/openpgp/aes/keywrap github.com/ProtonMail/go-crypto/openpgp/armor github.com/ProtonMail/go-crypto/openpgp/clearsign github.com/ProtonMail/go-crypto/openpgp/ecdh +github.com/ProtonMail/go-crypto/openpgp/ecdsa +github.com/ProtonMail/go-crypto/openpgp/eddsa github.com/ProtonMail/go-crypto/openpgp/elgamal github.com/ProtonMail/go-crypto/openpgp/errors github.com/ProtonMail/go-crypto/openpgp/internal/algorithm @@ -48,10 +76,10 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k -# github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f +# github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f ## explicit; go 1.12 github.com/ProtonMail/go-mime -# github.com/ProtonMail/gopenpgp/v2 v2.4.7 +# github.com/ProtonMail/gopenpgp/v2 v2.7.5 ## explicit; go 1.15 github.com/ProtonMail/gopenpgp/v2/armor github.com/ProtonMail/gopenpgp/v2/constants @@ -71,10 +99,11 @@ github.com/authzed/authzed-go/v1 # github.com/authzed/grpcutil v0.0.0-20240123194739-2ea1e3d2d98b ## explicit; go 1.20 github.com/authzed/grpcutil -# github.com/aws/aws-sdk-go v1.44.129 -## explicit; go 1.11 +# github.com/aws/aws-sdk-go v1.54.19 +## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client @@ -128,12 +157,13 @@ github.com/aws/aws-sdk-go/service/s3/s3manager github.com/aws/aws-sdk-go/service/secretsmanager github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/bluekeyes/go-gitdiff v0.5.0 +# github.com/bluekeyes/go-gitdiff v0.7.3 ## explicit; go 1.13 github.com/bluekeyes/go-gitdiff/gitdiff # github.com/cavaliergopher/rpm v1.2.0 @@ -142,21 +172,38 @@ github.com/cavaliergopher/rpm # github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d ## explicit; go 1.12 github.com/certifi/gocertifi -# github.com/cespare/xxhash/v2 v2.2.0 +# github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudflare/circl v1.3.9 +## explicit; go 1.21 +github.com/cloudflare/circl/dh/x25519 +github.com/cloudflare/circl/dh/x448 +github.com/cloudflare/circl/ecc/goldilocks +github.com/cloudflare/circl/internal/conv +github.com/cloudflare/circl/internal/sha3 +github.com/cloudflare/circl/math +github.com/cloudflare/circl/math/fp25519 +github.com/cloudflare/circl/math/fp448 +github.com/cloudflare/circl/math/mlsbset +github.com/cloudflare/circl/sign +github.com/cloudflare/circl/sign/ed25519 +github.com/cloudflare/circl/sign/ed448 # github.com/coreos/go-oidc/v3 v3.0.0 ## explicit; go 1.14 github.com/coreos/go-oidc/v3/oidc -# github.com/davecgh/go-spew v1.1.1 +# github.com/cyphar/filepath-securejoin v0.3.0 +## explicit; go 1.20 +github.com/cyphar/filepath-securejoin +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/emirpasic/gods v1.12.0 -## explicit +# github.com/emirpasic/gods v1.18.1 +## explicit; go 1.2 github.com/emirpasic/gods/containers github.com/emirpasic/gods/lists github.com/emirpasic/gods/lists/arraylist @@ -175,31 +222,32 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.4.9 -## explicit; go 1.13 +# github.com/fsnotify/fsnotify v1.7.0 +## explicit; go 1.17 github.com/fsnotify/fsnotify # github.com/go-chi/chi v4.1.2+incompatible ## explicit github.com/go-chi/chi github.com/go-chi/chi/middleware -# github.com/go-git/gcfg v1.5.0 -## explicit +# github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 +## explicit; go 1.13 github.com/go-git/gcfg github.com/go-git/gcfg/scanner github.com/go-git/gcfg/token github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.3.1 -## explicit; go 1.13 +# github.com/go-git/go-billy/v5 v5.5.0 +## explicit; go 1.19 github.com/go-git/go-billy/v5 github.com/go-git/go-billy/v5/helper/chroot github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.2.0 -## explicit; go 1.13 +# github.com/go-git/go-git/v5 v5.12.0 +## explicit; go 1.19 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config +github.com/go-git/go-git/v5/internal/path_util github.com/go-git/go-git/v5/internal/revision github.com/go-git/go-git/v5/internal/url github.com/go-git/go-git/v5/plumbing @@ -214,6 +262,7 @@ github.com/go-git/go-git/v5/plumbing/format/index github.com/go-git/go-git/v5/plumbing/format/objfile github.com/go-git/go-git/v5/plumbing/format/packfile github.com/go-git/go-git/v5/plumbing/format/pktline +github.com/go-git/go-git/v5/plumbing/hash github.com/go-git/go-git/v5/plumbing/object github.com/go-git/go-git/v5/plumbing/protocol/packp github.com/go-git/go-git/v5/plumbing/protocol/packp/capability @@ -240,7 +289,9 @@ github.com/go-git/go-git/v5/utils/merkletrie/filesystem github.com/go-git/go-git/v5/utils/merkletrie/index github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder -# github.com/go-logr/logr v1.4.1 +github.com/go-git/go-git/v5/utils/sync +github.com/go-git/go-git/v5/utils/trace +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -288,8 +339,8 @@ github.com/golang/groupcache/lru # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -341,8 +392,8 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.1 -## explicit; go 1.19 +# github.com/googleapis/gax-go/v2 v2.12.5 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -365,8 +416,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/go-cleanhttp v0.5.1 -## explicit +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-retryablehttp v0.6.8 ## explicit; go 1.13 @@ -383,11 +434,8 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/imdario/mergo v0.3.11 -## explicit; go 1.13 -github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.0 -## explicit +# github.com/inconshreveable/mousetrap v1.1.0 +## explicit; go 1.18 github.com/inconshreveable/mousetrap # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit @@ -409,7 +457,7 @@ github.com/json-iterator/go # github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 ## explicit; go 1.12 github.com/jzelinskie/stringz -# github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd +# github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config # github.com/lib/pq v1.10.2 @@ -417,8 +465,8 @@ github.com/kevinburke/ssh_config github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram -# github.com/magiconair/properties v1.8.1 -## explicit +# github.com/magiconair/properties v1.8.7 +## explicit; go 1.19 github.com/magiconair/properties # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 @@ -439,10 +487,7 @@ github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 ## explicit github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mitchellh/go-homedir v1.1.0 -## explicit -github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.4.1 +# github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -460,13 +505,22 @@ github.com/ory/hydra-client-go/v2 # github.com/pborman/uuid v1.2.1 ## explicit github.com/pborman/uuid -# github.com/pelletier/go-toml v1.8.1 -## explicit; go 1.12 -github.com/pelletier/go-toml +# github.com/pelletier/go-toml/v2 v2.2.2 +## explicit; go 1.16 +github.com/pelletier/go-toml/v2 +github.com/pelletier/go-toml/v2/internal/characters +github.com/pelletier/go-toml/v2/internal/danger +github.com/pelletier/go-toml/v2/internal/tracker +github.com/pelletier/go-toml/v2/unstable +# github.com/pjbgf/sha1cd v0.3.0 +## explicit; go 1.19 +github.com/pjbgf/sha1cd +github.com/pjbgf/sha1cd/internal +github.com/pjbgf/sha1cd/ubc # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pmezard/go-difflib v1.0.0 +# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib # github.com/prometheus/client_golang v1.13.0 @@ -493,8 +547,8 @@ github.com/rivo/uniseg # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron -# github.com/rocky-linux/srpmproc v0.4.3 -## explicit; go 1.18 +# github.com/rocky-linux/srpmproc v0.6.4 +## explicit; go 1.21 github.com/rocky-linux/srpmproc/modulemd github.com/rocky-linux/srpmproc/pb github.com/rocky-linux/srpmproc/pkg/blob @@ -507,42 +561,63 @@ github.com/rocky-linux/srpmproc/pkg/misc github.com/rocky-linux/srpmproc/pkg/modes github.com/rocky-linux/srpmproc/pkg/rpmutils github.com/rocky-linux/srpmproc/pkg/srpmproc -# github.com/sergi/go-diff v1.1.0 -## explicit; go 1.12 +# github.com/sagikazarmark/locafero v0.6.0 +## explicit; go 1.20 +github.com/sagikazarmark/locafero +# github.com/sagikazarmark/slog-shim v0.1.0 +## explicit; go 1.20 +github.com/sagikazarmark/slog-shim +# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 +## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch -# github.com/sirupsen/logrus v1.8.1 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/afero v1.10.0 -## explicit; go 1.16 +# github.com/skeema/knownhosts v1.2.2 +## explicit; go 1.17 +github.com/skeema/knownhosts +# github.com/sourcegraph/conc v0.3.0 +## explicit; go 1.19 +github.com/sourcegraph/conc +github.com/sourcegraph/conc/internal/multierror +github.com/sourcegraph/conc/iter +github.com/sourcegraph/conc/panics +# github.com/spf13/afero v1.11.0 +## explicit; go 1.19 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.3.0 -## explicit +# github.com/spf13/cast v1.6.0 +## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.1.3 -## explicit; go 1.12 +# github.com/spf13/cobra v1.8.1 +## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/jwalterweatherman v1.0.0 -## explicit -github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.7.1 -## explicit; go 1.12 -github.com/spf13/viper -# github.com/stretchr/objx v0.5.0 -## explicit; go 1.12 -github.com/stretchr/objx -# github.com/stretchr/testify v1.8.4 +# github.com/spf13/viper v1.19.0 ## explicit; go 1.20 +github.com/spf13/viper +github.com/spf13/viper/internal/encoding +github.com/spf13/viper/internal/encoding/dotenv +github.com/spf13/viper/internal/encoding/hcl +github.com/spf13/viper/internal/encoding/ini +github.com/spf13/viper/internal/encoding/javaproperties +github.com/spf13/viper/internal/encoding/json +github.com/spf13/viper/internal/encoding/toml +github.com/spf13/viper/internal/encoding/yaml +github.com/spf13/viper/internal/features +# github.com/stretchr/objx v0.5.2 +## explicit; go 1.20 +github.com/stretchr/objx +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/subosito/gotenv v1.2.0 -## explicit +# github.com/subosito/gotenv v1.6.0 +## explicit; go 1.18 github.com/subosito/gotenv # github.com/vbauerster/mpb/v7 v7.0.2 ## explicit; go 1.14 @@ -553,8 +628,8 @@ github.com/vbauerster/mpb/v7/internal # github.com/xanzy/go-gitlab v0.50.4 ## explicit; go 1.13 github.com/xanzy/go-gitlab -# github.com/xanzy/ssh-agent v0.2.1 -## explicit +# github.com/xanzy/ssh-agent v0.3.3 +## explicit; go 1.16 github.com/xanzy/ssh-agent # go.opencensus.io v0.24.0 ## explicit; go 1.13 @@ -575,16 +650,17 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -596,13 +672,14 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.24.0 +# go.opentelemetry.io/otel/metric v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/trace v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded # go.temporal.io/api v1.6.1-0.20211110205628-60c98e9cbfe2 @@ -641,11 +718,16 @@ go.temporal.io/sdk/log go.temporal.io/sdk/temporal go.temporal.io/sdk/worker go.temporal.io/sdk/workflow -# go.uber.org/atomic v1.9.0 -## explicit; go 1.13 -go.uber.org/atomic -# golang.org/x/crypto v0.19.0 +# go.uber.org/atomic v1.11.0 ## explicit; go 1.18 +go.uber.org/atomic +# go.uber.org/multierr v1.11.0 +## explicit; go 1.19 +go.uber.org/multierr +# golang.org/x/crypto v0.25.0 +## explicit; go 1.20 +golang.org/x/crypto/argon2 +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 @@ -653,7 +735,6 @@ golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 -golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias @@ -665,11 +746,19 @@ golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/net v0.21.0 +# golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 +## explicit; go 1.20 +golang.org/x/exp/constraints +golang.org/x/exp/slices +golang.org/x/exp/slog +golang.org/x/exp/slog/internal +golang.org/x/exp/slog/internal/buffer +# golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -680,31 +769,34 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.17.0 +# golang.org/x/oauth2 v0.21.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google -golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/externalaccount golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/impersonate golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/semaphore -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu +golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.17.0 +golang.org/x/sys/windows/registry +# golang.org/x/term v0.22.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -729,10 +821,8 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.17.0 -## explicit; go 1.18 -# google.golang.org/api v0.166.0 -## explicit; go 1.19 +# google.golang.org/api v0.188.0 +## explicit; go 1.20 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -749,35 +839,22 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.8 -## explicit; go 1.11 -google.golang.org/appengine -google.golang.org/appengine/internal -google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/base -google.golang.org/appengine/internal/datastore -google.golang.org/appengine/internal/log -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c -## explicit; go 1.19 +# google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d +## explicit; go 1.20 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d +## explicit; go 1.20 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d +## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.62.0 -## explicit; go 1.19 +# google.golang.org/grpc v1.65.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -786,6 +863,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -818,7 +896,6 @@ google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle @@ -834,12 +911,14 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha +google.golang.org/grpc/reflection/internal google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual @@ -847,14 +926,16 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.32.0 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -879,6 +960,7 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb @@ -889,7 +971,7 @@ google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/ini.v1 v1.57.0 +# gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 # gopkg.in/square/go-jose.v2 v2.5.1